code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCAmelCase__( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Dict = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = self._create_example_records()
_SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertDictEqual(_SCREAMING_SNAKE_CASE , example_records[i] )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = self._create_example_records()
_SCREAMING_SNAKE_CASE : int = Dataset.from_list(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase_ ( self ) -> Dict: # checks what happens with missing columns
_SCREAMING_SNAKE_CASE : Optional[int] = [{"col_1": 1}, {"col_2": "x"}]
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def UpperCamelCase_ ( self ) -> Optional[int]: # checks if the type can be inferred from the second record
_SCREAMING_SNAKE_CASE : Optional[Any] = [{"col_1": []}, {"col_1": [1, 2]}]
_SCREAMING_SNAKE_CASE : Any = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 364 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 325 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=None , a__=2 , ):
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Any = scope
_lowerCAmelCase : Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : int = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[Any] = num_patches + 1
def __A ( self ):
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : str = ViTModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Any = ViTForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Union[str, Any] = ViTForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = self.type_sequence_label_size
_lowerCAmelCase : Dict = ViTForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : Dict = ViTForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[str] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = True
_UpperCamelCase : str = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Dict = ViTModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
_lowerCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[str] = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __A ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = ViTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Dict = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(a__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : str = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**a__ )
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : List[Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@slow
def __A ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_lowerCAmelCase : Optional[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(a__ )
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=a__ , return_tensors="""pt""" )
_lowerCAmelCase : Optional[Any] = inputs.pixel_values.to(a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : str = model(a__ , interpolate_pos_encoding=a__ )
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __A ( self ):
_lowerCAmelCase : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=a__ , return_tensors="""pt""" )
_lowerCAmelCase : Any = inputs.pixel_values.to(a__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(a__ )
| 44 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"]
_UpperCAmelCase : Dict = "BridgeTowerImageProcessor"
_UpperCAmelCase : Dict = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Any , A : List[Any] , A : Tuple ) ->Dict:
super().__init__(A , A )
def __call__( self : str , A : int , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : Union[str, Any] , ) ->BatchEncoding:
lowerCamelCase__ : Optional[int] = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel_values + pixel_mask
lowerCamelCase__ : int = self.image_processor(
A , return_tensors=A , do_normalize=A , do_center_crop=A , **A )
encoding.update(A )
return encoding
def __lowerCamelCase ( self : str , *A : Dict , **A : List[str] ) ->List[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Tuple ) ->Dict:
return self.tokenizer.decode(*A , **A )
@property
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : Optional[int] = self.tokenizer.model_input_names
lowerCamelCase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 142 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( snake_case=32 , snake_case=10 , snake_case=1_00 , snake_case=10_26 , snake_case=True , snake_case="data/tokenized_stories_train_wikitext103.jbl" , snake_case="igf_context_pairs.jbl" , ) -> Optional[int]:
set_seed(3 )
# generate train_data and objective_set
_lowercase , _lowercase : List[str] = generate_datasets(
snake_case , snake_case , number=snake_case , min_len=10_26 , trim=snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowercase : int = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
_lowercase : str = load_gpta("gpt2" ).to(snake_case )
print("computing perplexity on objective set" )
_lowercase : Dict = compute_perplexity(snake_case , snake_case , snake_case ).item()
print("perplexity on objective set:" , snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( snake_case , snake_case=15 , snake_case=1_28 , snake_case=1_00 , snake_case="igf_model.pt" , ) -> Optional[Any]:
set_seed(42 )
# Load pre-trained model
_lowercase : Tuple = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
_lowercase : Any = SecondaryLearner(snake_case )
# Train secondary learner
_lowercase : Any = train_secondary_learner(
snake_case , snake_case , max_epochs=snake_case , batch_size=snake_case , eval_freq=1_00 , igf_model_path=snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( snake_case , snake_case , snake_case , snake_case=32 , snake_case=10_00 , snake_case=16 , snake_case=1.0 , snake_case=recopy_gpta , snake_case=None , snake_case=10 , snake_case="gpt2_finetuned.pt" , ) -> Dict:
_lowercase : str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
_lowercase : int = RandomSampler(snake_case )
_lowercase : int = DataLoader(snake_case , sampler=snake_case )
_lowercase : Tuple = max_steps // (len(snake_case )) + 1
_lowercase : Dict = 0
_lowercase : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case )
_lowercase , _lowercase , _lowercase : Union[str, Any] = recopy_model(snake_case , snake_case , snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case )
secondary_learner.eval()
_lowercase : Optional[Any] = []
_lowercase : Tuple = 0
_lowercase : int = []
_lowercase : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
_lowercase : Dict = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
for epoch in range(int(snake_case ) ):
for step, example in enumerate(snake_case ):
torch.cuda.empty_cache()
_lowercase : Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
_lowercase : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowercase : Tuple = model(snake_case , labels=snake_case )
_lowercase : List[Any] = True
if secondary_learner is not None:
_lowercase : Dict = secondary_learner.forward(
torch.tensor(snake_case , dtype=torch.long , device=snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowercase : Optional[Any] = -1
if predicted_q < threshold:
_lowercase : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowercase : Dict = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowercase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowercase : Optional[Any] = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ) -> Union[str, Any]:
_lowercase : Optional[Any] = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=snake_case , default=snake_case , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=snake_case , default=snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=snake_case , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=snake_case , default=snake_case , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_00 , type=snake_case , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_00 , type=snake_case , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=10_00 , type=snake_case , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_28 , type=snake_case , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=snake_case , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=snake_case , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_00 , type=snake_case , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=10_26 , type=snake_case , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=snake_case , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=snake_case , type=snake_case , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=snake_case , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=snake_case , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=snake_case , type=snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
_lowercase : Any = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
_lowercase : Union[str, Any] = training_secondary_learner(
snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
_lowercase : Optional[Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowercase , _lowercase : Optional[Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case , snake_case , snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=snake_case , secondary_learner=snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 199 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MvpTokenizer
_SCREAMING_SNAKE_CASE = MvpTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_roberta_detectors
def _snake_case ( self ) -> int:
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def _snake_case ( self , **lowercase ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , lowercase ) -> Any:
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> int:
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def _snake_case ( self ) -> int:
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(lowercase , max_length=len(lowercase ) , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase )
# Test that special tokens are reset
@require_torch
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertNotIn("""labels""" , lowercase )
self.assertNotIn("""decoder_attention_mask""" , lowercase )
@require_torch
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def _snake_case ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=lowercase , truncation=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(lowercase , text_target=lowercase , return_tensors="""pt""" )
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
lowerCAmelCase = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =KandinskyVaaPriorPipeline
lowercase_ : str =['''prompt''']
lowercase_ : Union[str, Any] =['''prompt''', '''negative_prompt''']
lowercase_ : Dict =[
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowercase_ : List[str] =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_0_0
@property
def A__ ( self):
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(A__)
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowercase = PriorTransformer(**A__)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_2_4 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1_4 ,)
lowercase = CLIPVisionModelWithProjection(A__)
return model
@property
def A__ ( self):
lowercase = CLIPImageProcessor(
crop_size=2_2_4 ,do_center_crop=A__ ,do_normalize=A__ ,do_resize=A__ ,image_mean=[0.48145466, 0.4578275, 0.40821073] ,image_std=[0.26862954, 0.26130258, 0.27577711] ,resample=3 ,size=2_2_4 ,)
return image_processor
def A__ ( self):
lowercase = self.dummy_prior
lowercase = self.dummy_image_encoder
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_image_processor
lowercase = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=1_0_0_0 ,clip_sample=A__ ,clip_sample_range=10.0 ,)
lowercase = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.image_embeds
lowercase = pipe(
**self.get_dummy_inputs(A__) ,return_dict=A__ ,)[0]
lowercase = image[0, -1_0:]
lowercase = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@skip_mps
def A__ ( self):
lowercase = torch_device == '''cpu'''
lowercase = True
lowercase = False
self._test_inference_batch_single_identical(
test_max_difference=A__ ,relax_max_difference=A__ ,test_mean_pixel_difference=A__ ,)
@skip_mps
def A__ ( self):
lowercase = torch_device == '''cpu'''
lowercase = False
self._test_attention_slicing_forward_pass(
test_max_difference=A__ ,test_mean_pixel_difference=A__ ,)
| 101 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class snake_case_ ( unittest.TestCase ):
def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : str=13 , _snake_case : int=30 , _snake_case : str=2 , _snake_case : int=3 , _snake_case : Optional[Any]=True , _snake_case : str=True , _snake_case : Optional[int]=32 , _snake_case : Dict=5 , _snake_case : Optional[int]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]="gelu" , _snake_case : str=0.1 , _snake_case : str=0.1 , _snake_case : str=10 , _snake_case : Any=0.02 , )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : int = image_size
__lowerCAmelCase : int = patch_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : str = is_training
__lowerCAmelCase : str = use_labels
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Tuple = (image_size // patch_size) ** 2
__lowerCAmelCase : Optional[Any] = num_patches + 1
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : List[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : str = FlaxViTModel(config=_snake_case )
__lowerCAmelCase : int = model(_snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Dict = (self.image_size, self.image_size)
__lowerCAmelCase : Any = (self.patch_size, self.patch_size)
__lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , _snake_case : str , _snake_case : List[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.type_sequence_label_size
__lowerCAmelCase : Tuple = FlaxViTForImageClassification(config=_snake_case )
__lowerCAmelCase : List[str] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase : str = 1
__lowerCAmelCase : Any = FlaxViTForImageClassification(_snake_case )
__lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Dict = model(_snake_case )
def UpperCAmelCase__ ( self : str )->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : str )->None:
'''simple docstring'''
__lowerCAmelCase : List[str] = FlaxViTModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCAmelCase__ ( self : Any )->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_snake_case )
__lowerCAmelCase : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Tuple = [*signature.parameters.keys()]
__lowerCAmelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase__ ( self : str )->str:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case : Dict , **_snake_case : Union[str, Any] ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : List[Any] = model_jitted(**_snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : str = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__lowerCAmelCase : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_snake_case ) | 232 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ ( __lowercase ):
def UpperCAmelCase__ ( self : Dict )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_snake_case , """num_heads""" ) )
class snake_case_ :
def __init__( self : Dict , _snake_case : int , _snake_case : str=13 , _snake_case : Optional[int]=64 , _snake_case : Union[str, Any]=3 , _snake_case : Any=[16, 48, 96] , _snake_case : List[str]=[1, 3, 6] , _snake_case : str=[1, 2, 10] , _snake_case : Tuple=[7, 3, 3] , _snake_case : Tuple=[4, 2, 2] , _snake_case : Tuple=[2, 1, 1] , _snake_case : List[str]=[2, 2, 2] , _snake_case : Tuple=[False, False, True] , _snake_case : int=[0.0, 0.0, 0.0] , _snake_case : Union[str, Any]=0.02 , _snake_case : List[str]=1E-12 , _snake_case : str=True , _snake_case : Any=True , _snake_case : Optional[Any]=2 , )->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : Optional[Any] = patch_sizes
__lowerCAmelCase : Tuple = patch_stride
__lowerCAmelCase : List[Any] = patch_padding
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : str = use_labels
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : Tuple = embed_dim
__lowerCAmelCase : Optional[int] = num_heads
__lowerCAmelCase : Union[str, Any] = stride_kv
__lowerCAmelCase : List[Any] = depth
__lowerCAmelCase : int = cls_token
__lowerCAmelCase : Optional[Any] = attention_drop_rate
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] )->int:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int , _snake_case : str , _snake_case : Union[str, Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = TFCvtModel(config=_snake_case )
__lowerCAmelCase : Optional[Any] = model(_snake_case , training=_snake_case )
__lowerCAmelCase : str = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase : Tuple = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowerCAmelCase : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowerCAmelCase : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase__ ( self : Tuple , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Optional[int] = TFCvtForImageClassification(_snake_case )
__lowerCAmelCase : str = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self : List[str] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = TFCvtModelTester(self )
__lowerCAmelCase : Optional[Any] = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] )->Optional[int]:
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCAmelCase__ ( self : str )->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Union[str, Any] )->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Tuple )->Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def UpperCAmelCase__ ( self : Tuple )->Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_snake_case )
__lowerCAmelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : int = [*signature.parameters.keys()]
__lowerCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase__ ( self : int )->List[str]:
'''simple docstring'''
def check_hidden_states_output(_snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
__lowerCAmelCase : Any = model_class(_snake_case )
__lowerCAmelCase : Any = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowerCAmelCase : Optional[Any] = outputs.hidden_states
__lowerCAmelCase : Tuple = len(self.model_tester.depth )
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase__ ( self : Dict )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[Any] = TFCvtModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Dict )->List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase : List[Any] = self.default_image_processor
__lowerCAmelCase : Optional[int] = prepare_img()
__lowerCAmelCase : int = image_processor(images=_snake_case , return_tensors="""tf""" )
# forward pass
__lowerCAmelCase : Dict = model(**_snake_case )
# verify the logits
__lowerCAmelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowerCAmelCase : Any = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) ) | 232 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_snake_case , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_snake_case , default=5 )
parser.add_argument("--batch_size" , type=_snake_case , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_snake_case , default=1 )
parser.add_argument("--freeze" , type=_snake_case , default=_snake_case )
parser.add_argument("--learning_rate" , type=_snake_case , default=5E-4 )
parser.add_argument("--seed" , type=_snake_case , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_snake_case , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_snake_case , default=10 )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--output_dir" , type=_snake_case , default="./results" )
return parser.parse_args()
snake_case : Optional[int] = load("accuracy")
def lowerCAmelCase_ ( _snake_case : Tuple ) -> int:
'''simple docstring'''
__magic_name__ , __magic_name__ : Any = eval_pred
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return metric.compute(predictions=_snake_case , references=_snake_case )
class _snake_case ( snake_case ):
def __init__( self , _a ):
super().__init__()
__magic_name__ : Tuple = trainer
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
if control.should_evaluate:
__magic_name__ : Any = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
__magic_name__ : Optional[Any] = get_args()
set_seed(args.seed )
__magic_name__ : Union[str, Any] = load_dataset("codeparrot/codecomplex" , split="train" )
__magic_name__ : Union[str, Any] = dataset.train_test_split(test_size=0.2 )
__magic_name__ : Tuple = train_test["test"].train_test_split(test_size=0.5 )
__magic_name__ : Optional[Any] = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
__magic_name__ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ : Union[str, Any] = tokenizer.eos_token
__magic_name__ : str = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__magic_name__ : Tuple = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__magic_name__ : Dict = False
__magic_name__ : str = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_snake_case : List[Any] ):
__magic_name__ : Dict = tokenizer(example["src"] , truncation=_snake_case , max_length=1024 )
__magic_name__ : Union[str, Any] = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__magic_name__ : Optional[int] = train_test_validation.map(
_snake_case , batched=_snake_case , remove_columns=train_test_validation["train"].column_names , )
__magic_name__ : str = DataCollatorWithPadding(tokenizer=_snake_case )
__magic_name__ : Any = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
__magic_name__ : Dict = Trainer(
model=_snake_case , args=_snake_case , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_snake_case , data_collator=_snake_case , compute_metrics=_snake_case , )
print("Training..." )
trainer.add_callback(CustomCallback(_snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 281 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :str = 'ZinengTang/tvlt-base'
lowercase_ :Tuple = tempfile.mkdtemp()
def UpperCamelCase ( self , **UpperCamelCase_ ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def UpperCamelCase ( self , **UpperCamelCase_ ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
lowercase_ :str = self.get_image_processor()
lowercase_ :Optional[int] = self.get_feature_extractor()
lowercase_ :List[Any] = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ :Dict = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_ )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = self.get_image_processor()
lowercase_ :List[str] = self.get_feature_extractor()
lowercase_ :Optional[int] = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
lowercase_ :List[str] = np.ones([1_2000] )
lowercase_ :int = feature_extractor(UpperCamelCase_ , return_tensors='''np''' )
lowercase_ :Optional[int] = processor(audio=UpperCamelCase_ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.get_image_processor()
lowercase_ :int = self.get_feature_extractor()
lowercase_ :Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
lowercase_ :Tuple = np.ones([3, 224, 224] )
lowercase_ :Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
lowercase_ :str = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self ):
lowercase_ :str = self.get_image_processor()
lowercase_ :Optional[Any] = self.get_feature_extractor()
lowercase_ :Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
lowercase_ :List[Any] = np.ones([1_2000] )
lowercase_ :Any = np.ones([3, 224, 224] )
lowercase_ :Union[str, Any] = processor(audio=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def UpperCamelCase ( self ):
lowercase_ :Any = self.get_image_processor()
lowercase_ :Union[str, Any] = self.get_feature_extractor()
lowercase_ :int = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 359 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE : Dict = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCamelCase ( _a , _a=None ) -> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , _a )
| 252 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if "cls_token" in name:
lowerCamelCase__ : List[str] = name.replace('cls_token' , 'vit.embeddings.cls_token' )
if "mask_token" in name:
lowerCamelCase__ : Optional[int] = name.replace('mask_token' , 'decoder.mask_token' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Optional[Any] = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Dict = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCamelCase__ : int = name.replace('patch_embed.norm' , 'vit.embeddings.norm' )
if "decoder_blocks" in name:
lowerCamelCase__ : Optional[int] = name.replace('decoder_blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
lowerCamelCase__ : str = name.replace('blocks' , 'vit.encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : int = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
lowerCamelCase__ : Dict = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
lowerCamelCase__ : Any = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
lowerCamelCase__ : List[Any] = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase__ : str = name.replace('norm.weight' , 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase__ : List[Any] = name.replace('norm.bias' , 'vit.layernorm.bias' )
return name
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Tuple = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCamelCase__ : List[str] = key.split('.' )
lowerCamelCase__ : List[str] = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase__ : Any = config.decoder_hidden_size
lowerCamelCase__ : List[Any] = 'decoder.decoder_layers.'
if "weight" in key:
lowerCamelCase__ : int = val[:dim, :]
lowerCamelCase__ : Union[str, Any] = val[dim : dim * 2, :]
lowerCamelCase__ : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : List[str] = val[:dim]
lowerCamelCase__ : Optional[int] = val[dim : dim * 2]
lowerCamelCase__ : str = val[-dim:]
else:
lowerCamelCase__ : Any = config.hidden_size
lowerCamelCase__ : Dict = 'vit.encoder.layer.'
if "weight" in key:
lowerCamelCase__ : Optional[Any] = val[:dim, :]
lowerCamelCase__ : List[str] = val[dim : dim * 2, :]
lowerCamelCase__ : List[str] = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : Dict = val[:dim]
lowerCamelCase__ : Union[str, Any] = val[dim : dim * 2]
lowerCamelCase__ : Optional[Any] = val[-dim:]
else:
lowerCamelCase__ : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase__ : List[Any] = 1024
lowerCamelCase__ : str = 4096
lowerCamelCase__ : Optional[Any] = 24
lowerCamelCase__ : List[str] = 16
elif "huge" in checkpoint_url:
lowerCamelCase__ : Any = 14
lowerCamelCase__ : str = 1280
lowerCamelCase__ : Dict = 5120
lowerCamelCase__ : Tuple = 32
lowerCamelCase__ : List[Any] = 16
lowerCamelCase__ : List[Any] = ViTMAEForPreTraining(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )['model']
lowerCamelCase__ : List[str] = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : int = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
lowerCamelCase__ : Union[str, Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
lowerCamelCase__ : int = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCamelCase__ : str = model(**_UpperCAmelCase )
lowerCamelCase__ : Any = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase__ : List[str] = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
lowerCamelCase__ : Tuple = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 50 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
UpperCAmelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
UpperCAmelCase = '''.'''.join(A__ )
return test_module_path
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = get_module_path(A__ )
UpperCAmelCase = importlib.import_module(A__ )
return test_module
def _lowerCAmelCase ( A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = get_test_module(A__ )
for attr in dir(A__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(A__ , A__ ) )
# sort with class names
return sorted(A__ , key=lambda A__ : x.__name__ )
def _lowerCAmelCase ( A__: List[Any] ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = get_test_module(A__ )
for attr in dir(A__ ):
UpperCAmelCase = getattr(A__ , A__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase = getattr(A__ , '''all_model_classes''' , [] )
if len(A__ ) > 0:
test_classes.append(A__ )
# sort with class names
return sorted(A__ , key=lambda A__ : x.__name__ )
def _lowerCAmelCase ( A__: List[str] ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A__ )
UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(A__ , key=lambda A__ : x.__name__ )
def _lowerCAmelCase ( A__: Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = test_class()
if hasattr(A__ , '''setUp''' ):
test.setUp()
UpperCAmelCase = None
if hasattr(A__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase = test.model_tester.__class__
return model_tester
def _lowerCAmelCase ( A__: str , A__: Optional[int] ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A__ )
UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A__ )
# sort with class names
return sorted(A__ , key=lambda A__ : x.__name__ )
def _lowerCAmelCase ( A__: Union[str, Any] , A__: Dict ):
'''simple docstring'''
UpperCAmelCase = get_test_classes_for_model(A__ , A__ )
UpperCAmelCase = []
for test_class in test_classes:
UpperCAmelCase = get_model_tester_from_test_class(A__ )
if tester_class is not None:
tester_classes.append(A__ )
# sort with class names
return sorted(A__ , key=lambda A__ : x.__name__ )
def _lowerCAmelCase ( A__: Any ):
'''simple docstring'''
UpperCAmelCase = get_test_classes(A__ )
UpperCAmelCase = {test_class: get_model_tester_from_test_class(A__ ) for test_class in test_classes}
return test_tester_mapping
def _lowerCAmelCase ( A__: Optional[int] ):
'''simple docstring'''
UpperCAmelCase = get_model_classes(A__ )
UpperCAmelCase = {
model_class: get_test_classes_for_model(A__ , A__ ) for model_class in model_classes
}
return model_test_mapping
def _lowerCAmelCase ( A__: Dict ):
'''simple docstring'''
UpperCAmelCase = get_model_classes(A__ )
UpperCAmelCase = {
model_class: get_tester_classes_for_model(A__ , A__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCAmelCase ( A__: List[Any] ):
'''simple docstring'''
if isinstance(A__ , A__ ):
return o
elif isinstance(A__ , A__ ):
return o.__name__
elif isinstance(A__ , (list, tuple) ):
return [to_json(A__ ) for x in o]
elif isinstance(A__ , A__ ):
return {to_json(A__ ): to_json(A__ ) for k, v in o.items()}
else:
return o
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 0 |
from datetime import datetime
import requests
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
SCREAMING_SNAKE_CASE__ = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(SCREAMING_SNAKE_CASE__ ).content
if __name__ == "__main__":
__snake_case = input("""Enter Video/IGTV url: """).strip()
__snake_case = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 176 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A :
UpperCamelCase__ : Union[str, Any] =XGLMConfig
UpperCamelCase__ : Dict ={}
UpperCamelCase__ : Tuple ='gelu'
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=14 , lowercase_ : Dict=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=99 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=2 , lowercase_ : Dict=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Union[str, Any]=0.02 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict =parent
_lowerCamelCase : Optional[Any] =batch_size
_lowerCamelCase : Optional[int] =seq_length
_lowerCamelCase : Union[str, Any] =is_training
_lowerCamelCase : Tuple =use_input_mask
_lowerCamelCase : str =use_labels
_lowerCamelCase : Any =vocab_size
_lowerCamelCase : List[str] =d_model
_lowerCamelCase : List[Any] =num_hidden_layers
_lowerCamelCase : Union[str, Any] =num_attention_heads
_lowerCamelCase : List[Any] =ffn_dim
_lowerCamelCase : Optional[Any] =activation_function
_lowerCamelCase : Dict =activation_dropout
_lowerCamelCase : Tuple =attention_dropout
_lowerCamelCase : List[str] =max_position_embeddings
_lowerCamelCase : int =initializer_range
_lowerCamelCase : Optional[int] =None
_lowerCamelCase : Optional[Any] =0
_lowerCamelCase : List[str] =2
_lowerCamelCase : Any =1
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCamelCase : Any =None
if self.use_input_mask:
_lowerCamelCase : str =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] =self.get_config()
_lowerCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : str =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any =config_and_inputs
_lowerCamelCase : Union[str, Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : List[str] =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Any =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : str =False
UpperCamelCase__ : int =False
UpperCamelCase__ : int =False
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =TFXGLMModelTester(self )
_lowerCamelCase : str =ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int =TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : str , lowercase_ : str=True ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : List[Any] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase : int =[2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_lowerCamelCase : Dict =model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_lowerCamelCase : Tuple =tokenizer('Today is a nice day and' , return_tensors='tf' )
_lowerCamelCase : Optional[int] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_lowerCamelCase : List[Any] =model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
_lowerCamelCase : Union[str, Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
_lowerCamelCase : int =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_lowerCamelCase : List[Any] =tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
_lowerCamelCase : int =inputs['input_ids']
_lowerCamelCase : str =model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
_lowerCamelCase : Optional[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : List[str] =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : Tuple =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Dict =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : str =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : str =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : List[str] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 199 | 0 |
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 0, 0, 0
lowerCamelCase__ : Optional[Any] = ugly_nums[ia] * 2
lowerCamelCase__ : Any = ugly_nums[ia] * 3
lowerCamelCase__ : int = ugly_nums[ia] * 5
for _ in range(1 , _lowerCamelCase ):
lowerCamelCase__ : List[str] = min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ugly_nums.append(_lowerCamelCase )
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"{ugly_numbers(2_00) = }")
| 363 |
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316 | 0 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowercase : int = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
lowercase : int = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : List[Any] = ' Hello world! cécé herlolip'
lowercase : Optional[int] = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : int) -> Tuple:
'''simple docstring'''
__UpperCamelCase : str = dct.pop(_lowerCamelCase)
__UpperCamelCase : Dict = val
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = torch.load(_lowerCamelCase , map_location="cpu")
__UpperCamelCase : List[Any] = torch.hub.load("pytorch/fairseq" , "bart.large.cnn").eval()
hub_interface.model.load_state_dict(sd["model"])
return hub_interface
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> Tuple:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Tuple = emb.weight.shape
__UpperCamelCase : str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
__UpperCamelCase : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=None) -> Union[str, Any]:
'''simple docstring'''
if not os.path.exists(_lowerCamelCase):
__UpperCamelCase : str = torch.hub.load("pytorch/fairseq" , _lowerCamelCase).eval()
else:
__UpperCamelCase : Dict = load_xsum_checkpoint(_lowerCamelCase)
bart.model.upgrade_state_dict(bart.model.state_dict())
if hf_checkpoint_name is None:
__UpperCamelCase : Dict = checkpoint_path.replace("." , "-")
__UpperCamelCase : Optional[int] = BartConfig.from_pretrained(_lowerCamelCase)
__UpperCamelCase : int = bart.encode(_lowerCamelCase).unsqueeze(0)
__UpperCamelCase : Union[str, Any] = BartTokenizer.from_pretrained(_lowerCamelCase).encode(_lowerCamelCase , return_tensors="pt").unsqueeze(0)
if not torch.eq(_lowerCamelCase , _lowerCamelCase).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}')
if checkpoint_path == "bart.large.mnli":
__UpperCamelCase : List[str] = bart.state_dict()
remove_ignore_keys_(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Optional[Any] = BartForSequenceClassification(_lowerCamelCase).eval()
model.load_state_dict(_lowerCamelCase)
__UpperCamelCase : Any = bart.predict("mnli" , _lowerCamelCase , return_logits=_lowerCamelCase)
__UpperCamelCase : Any = model(_lowerCamelCase)[0] # logits
else: # no classification heads to worry about
__UpperCamelCase : Any = bart.model.state_dict()
remove_ignore_keys_(_lowerCamelCase)
__UpperCamelCase : Any = state_dict["decoder.embed_tokens.weight"]
__UpperCamelCase : List[Any] = bart.extract_features(_lowerCamelCase)
if hf_checkpoint_name == "facebook/bart-large":
__UpperCamelCase : Optional[Any] = BartModel(_lowerCamelCase).eval()
model.load_state_dict(_lowerCamelCase)
__UpperCamelCase : Optional[int] = model(_lowerCamelCase).model[0]
else:
__UpperCamelCase : Dict = BartForConditionalGeneration(_lowerCamelCase).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowerCamelCase)
if hasattr(_lowerCamelCase , "lm_head"):
__UpperCamelCase : Any = make_linear_from_emb(model.model.shared)
__UpperCamelCase : Union[str, Any] = model.model(_lowerCamelCase)[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}')
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`")
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
lowercase : Dict = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) | 232 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]=False) -> Any:
'''simple docstring'''
__UpperCamelCase : Optional[int] = "backbone." if is_semantic else ""
__UpperCamelCase : Optional[Any] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight'))
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias'))
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight'))
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias'))
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias'))
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', "beit.embeddings.cls_token"),
(F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"),
(F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"),
(F'{prefix}pos_embed', "beit.embeddings.position_embeddings"),
])
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
])
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
])
return rename_keys
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]=False) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers):
__UpperCamelCase : List[Any] = "backbone." if is_semantic else ""
# queries, keys and values
__UpperCamelCase : Tuple = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight')
__UpperCamelCase : Optional[Any] = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias')
__UpperCamelCase : int = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias')
__UpperCamelCase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase : Any = q_bias
__UpperCamelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase : Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__UpperCamelCase : Union[str, Any] = state_dict.pop(F'{prefix}blocks.{i}.gamma_1')
__UpperCamelCase : Optional[int] = state_dict.pop(F'{prefix}blocks.{i}.gamma_2')
__UpperCamelCase : Any = gamma_a
__UpperCamelCase : List[Any] = gamma_a
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]) -> Any:
'''simple docstring'''
__UpperCamelCase : str = dct.pop(_lowerCamelCase)
__UpperCamelCase : str = val
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict=False) -> str:
'''simple docstring'''
__UpperCamelCase : str = False if "rvlcdip" in checkpoint_url else True
__UpperCamelCase : int = BeitConfig(use_absolute_position_embeddings=_lowerCamelCase , use_mask_token=_lowerCamelCase)
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__UpperCamelCase : Optional[int] = 1_024
__UpperCamelCase : Union[str, Any] = 4_096
__UpperCamelCase : Tuple = 24
__UpperCamelCase : int = 16
# labels
if "rvlcdip" in checkpoint_url:
__UpperCamelCase : List[str] = 16
__UpperCamelCase : str = "huggingface/label-files"
__UpperCamelCase : str = "rvlcdip-id2label.json"
__UpperCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset") , "r"))
__UpperCamelCase : Dict = {int(_lowerCamelCase): v for k, v in idalabel.items()}
__UpperCamelCase : Optional[Any] = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__UpperCamelCase : str = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")["model"]
__UpperCamelCase : Optional[Any] = create_rename_keys(_lowerCamelCase , has_lm_head=_lowerCamelCase)
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , has_lm_head=_lowerCamelCase)
# load HuggingFace model
__UpperCamelCase : List[Any] = BeitForMaskedImageModeling(_lowerCamelCase) if has_lm_head else BeitForImageClassification(_lowerCamelCase)
model.eval()
model.load_state_dict(_lowerCamelCase)
# Check outputs on an image
__UpperCamelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = prepare_img()
__UpperCamelCase : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors="pt")
__UpperCamelCase : str = encoding["pixel_values"]
__UpperCamelCase : str = model(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = outputs.logits
# verify logits
__UpperCamelCase : Tuple = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(_lowerCamelCase), "Shape of logits not as expected"
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(F'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(_lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
if has_lm_head:
__UpperCamelCase : List[str] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__UpperCamelCase : Optional[Any] = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowercase : Tuple = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 232 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1_00 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
__lowerCAmelCase : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCAmelCase : Any = audio_length_in_s * self.unet.config.sample_rate
__lowerCAmelCase : Dict = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__lowerCAmelCase : Tuple = int(lowerCAmelCase )
if sample_size % down_scale_factor != 0:
__lowerCAmelCase : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
__lowerCAmelCase : List[Any] = int(lowerCAmelCase )
__lowerCAmelCase : Any = next(iter(self.unet.parameters() ) ).dtype
__lowerCAmelCase : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__lowerCAmelCase : int = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase , device=audio.device )
__lowerCAmelCase : int = self.scheduler.timesteps.to(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCAmelCase : str = self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCAmelCase : str = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
__lowerCAmelCase : Tuple = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCAmelCase : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCAmelCase )
| 139 |
__UpperCAmelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 139 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Tuple = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A__ :
def __init__( self : int , _a : int , _a : int=16 , _a : Dict=13 , _a : Optional[Any]=7 , _a : List[str]=14 , _a : int=10 , _a : List[Any]=19 , _a : int=5 , _a : Dict=4 , _a : Optional[Any]=True , _a : Tuple=16 , _a : Optional[int]=2 , _a : Any=4 , _a : Optional[int]=4 , _a : str="gelu" , _a : Union[str, Any]=0.1 , _a : Optional[int]=0.1 , _a : str=[1, 2, 3, 4, 5] , _a : Tuple=25 , _a : Union[str, Any]=5 , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =prediction_length
_SCREAMING_SNAKE_CASE =context_length
_SCREAMING_SNAKE_CASE =cardinality
_SCREAMING_SNAKE_CASE =num_time_features
_SCREAMING_SNAKE_CASE =lags_sequence
_SCREAMING_SNAKE_CASE =embedding_dimension
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =context_length
_SCREAMING_SNAKE_CASE =prediction_length + label_length
_SCREAMING_SNAKE_CASE =label_length
_SCREAMING_SNAKE_CASE =moving_average
_SCREAMING_SNAKE_CASE =autocorrelation_factor
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A ( self : Dict , _a : Tuple ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =config.context_length + max(config.lags_sequence )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, _past_length] )
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, config.prediction_length] )
_SCREAMING_SNAKE_CASE ={
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_config()
_SCREAMING_SNAKE_CASE =self.prepare_autoformer_inputs_dict(_a )
return config, inputs_dict
def A ( self : List[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : int , _a : List[Any] , _a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoformerModel(config=_a ).to(_a ).eval()
_SCREAMING_SNAKE_CASE =model(**_a )
_SCREAMING_SNAKE_CASE =outputs.encoder_last_hidden_state
_SCREAMING_SNAKE_CASE =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =model.get_encoder()
encoder.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoformerEncoder.from_pretrained(_a ).to(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model.create_network_inputs(**_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_SCREAMING_SNAKE_CASE =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_SCREAMING_SNAKE_CASE =encoder(inputs_embeds=_a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_SCREAMING_SNAKE_CASE =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_SCREAMING_SNAKE_CASE =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_SCREAMING_SNAKE_CASE =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_SCREAMING_SNAKE_CASE =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =model.get_decoder()
decoder.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoformerDecoder.from_pretrained(_a ).to(_a )
_SCREAMING_SNAKE_CASE =decoder(
trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A__ = (AutoformerForPrediction,) if is_torch_available() else ()
A__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoformerModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info['missing_keys'] , [] )
def A ( self : List[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
@unittest.skip(reason='Model has no tokens embeddings' )
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def A ( self : List[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =inspect.signature(getattr(_a , 'forward' ) )
# The main input is the name of the argument after `self`
_SCREAMING_SNAKE_CASE =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _a )
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =[
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(_a )] , _a )
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =getattr(self.model_tester , 'seq_length' , _a )
_SCREAMING_SNAKE_CASE =getattr(self.model_tester , 'decoder_seq_length' , _a )
_SCREAMING_SNAKE_CASE =getattr(self.model_tester , 'encoder_seq_length' , _a )
_SCREAMING_SNAKE_CASE =getattr(self.model_tester , 'd_model' , _a )
_SCREAMING_SNAKE_CASE =getattr(self.model_tester , 'num_attention_heads' , _a )
_SCREAMING_SNAKE_CASE =d_model // num_attention_heads
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_a , _a )
# decoder attentions
_SCREAMING_SNAKE_CASE =outputs.decoder_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_SCREAMING_SNAKE_CASE =outputs.cross_attentions
self.assertIsInstance(_a , (list, tuple) )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 2 , len(_a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A ( self : str ) -> Any:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any]="train-batch.pt" ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_UpperCamelCase , repo_type='dataset' )
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class A__ ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_a )
_SCREAMING_SNAKE_CASE =prepare_batch()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
_SCREAMING_SNAKE_CASE =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_a )
_SCREAMING_SNAKE_CASE =prepare_batch('val-batch.pt' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
_SCREAMING_SNAKE_CASE =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=_a )
self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) )
def A ( self : Any ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_a )
_SCREAMING_SNAKE_CASE =prepare_batch('val-batch.pt' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
_SCREAMING_SNAKE_CASE =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=_a )
_SCREAMING_SNAKE_CASE =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1e-1 ) )
| 47 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
def extract(*A , **A ):
class __lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = torch.ones([0] )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(A )
return self
return Out()
return extract
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=A )
assert isinstance(A , A )
assert isinstance(pipe.scheduler , A )
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowerCamelCase = StableDiffusionPipeline.from_pretrained(A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowerCamelCase = unet.half()
lowerCamelCase = vae.half()
lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowerCamelCase = 40_03_66_03_46
lowerCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowerCamelCase = 27_34_97_17_55
lowerCamelCase = 7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowerCamelCase = 10_44_35_52_34
lowerCamelCase = 12
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 252 | 0 |
import heapq
def lowercase__ ( __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case , [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Optional[int] = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Dict = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 145 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenizer_file=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[int] = len(self.sp_model )
UpperCAmelCase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : Any = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Dict = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[lang]
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = [self.eos_token_id, self.cur_lang_code]
| 145 | 1 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_a = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_a = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_a = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return float((preds == labels).mean() )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = simple_accuracy(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = float(fa_score(y_true=UpperCamelCase__, y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = np.array(UpperCamelCase__ )
UpperCAmelCase_ : int = np.array(UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase_ : Optional[int] = en_sentvecs - np.mean(UpperCamelCase__, axis=0 )
UpperCAmelCase_ : List[str] = in_sentvecs - np.mean(UpperCamelCase__, axis=0 )
UpperCAmelCase_ : Any = cdist(UpperCamelCase__, UpperCamelCase__, "cosine" )
UpperCAmelCase_ : Optional[Any] = np.array(range(UpperCamelCase__ ) )
UpperCAmelCase_ : List[Any] = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase_ : Optional[int] = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowercase , __lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowercase , __lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 61 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
if not nums:
return 0
SCREAMING_SNAKE_CASE__ : Dict =nums[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =0
for num in nums[1:]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =(
max_excluding + num,
max(UpperCamelCase__, UpperCamelCase__ ),
)
return max(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 152 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28 |
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCAmelCase : Optional[Any] = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple = " "):
"""simple docstring"""
lowercase_ = sentence_delimiter
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Any):
"""simple docstring"""
return list(__UpperCAmelCase)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = []
for sent_idx, sentence in enumerate(__UpperCAmelCase):
chars.extend(self.process_string(__UpperCAmelCase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase) - 1:
chars.append(self.sentence_delimiter)
return chars
UpperCAmelCase : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCAmelCase : List[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCAmelCase : Any = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase : Optional[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
UpperCAmelCase : Dict = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=False):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
lowercase_ = 0
lowercase_ = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase):
lowercase_ = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 136 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 0 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A : Any = logging.get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : uuid.UUID = None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None):
if not conversation_id:
_A : str = uuid.uuida()
if past_user_inputs is None:
_A : List[Any] = []
if generated_responses is None:
_A : Union[str, Any] = []
_A : uuid.UUID = conversation_id
_A : List[str] = past_user_inputs
_A : List[str] = generated_responses
_A : Optional[str] = text
def __eq__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int]):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".')
_A : Optional[Any] = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_A : Optional[Any] = text
def A ( self : Optional[int]):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_A : Optional[Any] = None
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : str):
self.generated_responses.append(SCREAMING_SNAKE_CASE)
def A ( self : str):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict):
_A : Any = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_A : Optional[int] = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
a_ , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str):
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if self.tokenizer.pad_token_id is None:
_A : Any = self.tokenizer.eos_token
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : str):
_A : str = {}
_A : Union[str, Any] = {}
_A : List[str] = {}
if min_length_for_response is not None:
_A : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
_A : Tuple = minimum_tokens
if "max_length" in generate_kwargs:
_A : List[Any] = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_A : List[str] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , SCREAMING_SNAKE_CASE : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE : Any=0 , **SCREAMING_SNAKE_CASE : Tuple):
_A : Any = super().__call__(SCREAMING_SNAKE_CASE , num_workers=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and len(SCREAMING_SNAKE_CASE) == 1:
return outputs[0]
return outputs
def A ( self : Any , SCREAMING_SNAKE_CASE : Conversation , SCREAMING_SNAKE_CASE : Union[str, Any]=32):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise ValueError('ConversationalPipeline, expects Conversation as inputs')
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method')
if hasattr(self.tokenizer , '_build_conversation_input_ids'):
_A : Optional[Any] = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_A : Dict = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE)
if self.framework == "pt":
_A : Union[str, Any] = torch.LongTensor([input_ids])
elif self.framework == "tf":
_A : Optional[Any] = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=10 , **SCREAMING_SNAKE_CASE : Tuple):
_A : str = generate_kwargs.get('max_length' , self.model.config.max_length)
_A : Any = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_A : Dict = max_length - minimum_tokens
_A : int = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_A : str = model_inputs['attention_mask'][:, -trim:]
_A : Any = model_inputs.pop('conversation')
_A : Optional[Any] = max_length
_A : int = self.model.generate(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if self.model.config.is_encoder_decoder:
_A : int = 1
else:
_A : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any]=True):
_A : Optional[Any] = model_outputs['output_ids']
_A : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , )
_A : Any = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE)
return conversation
def A ( self : str , SCREAMING_SNAKE_CASE : Conversation):
_A : Optional[Any] = self.tokenizer.eos_token_id
_A : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE))
if len(SCREAMING_SNAKE_CASE) > self.tokenizer.model_max_length:
_A : Dict = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 227 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _snake_case :
_A : int = MBartConfig
_A : Any = {}
_A : Any = '''gelu'''
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str]=13 ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Dict=99 ,SCREAMING_SNAKE_CASE__ : str=32 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : List[Any]=37 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=20 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0 ,):
SCREAMING_SNAKE_CASE:List[Any] = parent
SCREAMING_SNAKE_CASE:List[str] = batch_size
SCREAMING_SNAKE_CASE:Optional[int] = seq_length
SCREAMING_SNAKE_CASE:Optional[Any] = is_training
SCREAMING_SNAKE_CASE:Tuple = use_labels
SCREAMING_SNAKE_CASE:List[str] = vocab_size
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE:List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE:Dict = num_attention_heads
SCREAMING_SNAKE_CASE:Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE:Dict = eos_token_id
SCREAMING_SNAKE_CASE:Any = pad_token_id
SCREAMING_SNAKE_CASE:str = bos_token_id
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
SCREAMING_SNAKE_CASE:Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
SCREAMING_SNAKE_CASE:Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
SCREAMING_SNAKE_CASE:Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
SCREAMING_SNAKE_CASE:str = prepare_mbart_inputs_dict(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:List[Any] = TFMBartModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
SCREAMING_SNAKE_CASE:Dict = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE:List[str] = input_ids[:1, :]
SCREAMING_SNAKE_CASE:Dict = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE:int = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE:Dict = 1
# first forward pass
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,head_mask=SCREAMING_SNAKE_CASE__ ,use_cache=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = outputs.to_tuple()
SCREAMING_SNAKE_CASE:str = past_key_values[1]
def A_ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE:List[Any] = tf.cast(tf.math.not_equal(snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE:Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE:List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE:int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE:Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( _a , _a , unittest.TestCase ):
_A : Optional[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_A : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_A : List[str] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : Any = True
_A : Tuple = False
_A : int = False
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:str = TFMBartModelTester(self )
SCREAMING_SNAKE_CASE:int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
_A : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
_A : int = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
_A : Optional[Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __UpperCamelCase ( self : Tuple ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __UpperCamelCase ( self : Tuple ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.translate_src_text(**SCREAMING_SNAKE_CASE__ )
self.assertListEqual(self.expected_text ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ,**SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:str = self.tokenizer(self.src_text ,**SCREAMING_SNAKE_CASE__ ,return_tensors="tf" )
SCREAMING_SNAKE_CASE:Any = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
SCREAMING_SNAKE_CASE:Optional[Any] = self.tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__ )
return generated_words
@slow
def __UpperCamelCase ( self : str ):
self._assert_generated_batch_equal_expected()
| 139 |
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Optional[Any] = set()
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Optional[Any] = n + 1 # maximum limit
for a in range(2 , snake_case ):
for b in range(2 , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = a**b # calculates the current power
collect_powers.add(snake_case ) # adds the result to the set
return len(snake_case )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 139 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Any=13 , lowercase_ : Optional[int]=7 , lowercase_ : Optional[Any]=True , lowercase_ : str=True , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : str=99 , lowercase_ : Any=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : int=16 , lowercase_ : str=2 , lowercase_ : Tuple=0.02 , lowercase_ : Dict=4 , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = True
__A = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
_UpperCamelCase = FlaxRoFormerModelTester(self)
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowercase_)
_UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase_)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
_UpperCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = 50000
_UpperCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4))
| 63 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__=True ) ->List[str]:
'''simple docstring'''
model.train()
_UpperCamelCase = model(a__ )
_UpperCamelCase = F.mse_loss(a__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(a__ )
def lowerCAmelCase__ ( a__ , a__=False ) ->Union[str, Any]:
'''simple docstring'''
set_seed(42 )
_UpperCamelCase = RegressionModel()
_UpperCamelCase = deepcopy(a__ )
_UpperCamelCase = RegressionDataset(length=80 )
_UpperCamelCase = DataLoader(a__ , batch_size=16 )
model.to(accelerator.device )
if sched:
_UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 )
_UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
_UpperCamelCase = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 )
_UpperCamelCase = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 )
# Make a copy of `model`
if sched:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ , a__ , a__ )
else:
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ )
# Use a single batch
_UpperCamelCase , _UpperCamelCase = next(iter(a__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a__ ):
step_model(a__ , a__ , a__ , a__ )
else:
# Sync grads
step_model(a__ , a__ , a__ , a__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(a__ , a__ , a__ , a__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )]
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ )
# Use a single batch
_UpperCamelCase , _UpperCamelCase = next(iter(a__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a__ ):
step_model(a__ , a__ , a__ , a__ )
else:
# Sync grads
step_model(a__ , a__ , a__ , a__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )]
def lowerCAmelCase__ ( a__=False , a__=False ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = Accelerator(
split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ )
for iteration, batch in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(a__ ):
step_model(a__ , a__ , a__ , a__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(a__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )]
GradientState._reset_state()
def lowerCAmelCase__ ( a__=False , a__=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase = Accelerator(
split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ , a__ )
for iteration, batch in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(a__ , a__ , a__ , a__ , a__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(a__ ):
step_model(a__ , a__ , a__ , a__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a__ ))
if accelerator.num_processes > 1:
check_model_parameters(a__ , a__ , a__ , a__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def lowerCAmelCase__ ( ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = Accelerator()
_UpperCamelCase = RegressionDataset(length=80 )
_UpperCamelCase = DataLoader(a__ , batch_size=16 )
_UpperCamelCase = RegressionDataset(length=96 )
_UpperCamelCase = DataLoader(a__ , batch_size=16 )
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(a__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a__ )
if iteration < len(a__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(a__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a__ )
if batch_num < len(a__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = Accelerator()
_UpperCamelCase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(a__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(a__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(a__ , a__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(a__ , a__ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 63 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__a = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 145 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Any = (3_2, 3_2)
_UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Union[str, Any] = DDPMScheduler()
_UpperCAmelCase : str = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : int = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : str = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.dummy_cond_unet_upscale
_UpperCAmelCase : Tuple = DDPMScheduler()
_UpperCAmelCase : Dict = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : str = self.dummy_vae
_UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
_UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : int = output.images
assert image.shape[0] == 2
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Any = DDPMScheduler()
_UpperCAmelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase : Tuple = unet.half()
_UpperCAmelCase : Dict = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Dict = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : str = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "a cat sitting on a park bench"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Dict = "a cat sitting on a park bench"
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : int = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : Tuple = "a cat sitting on a park bench"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9 | 145 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "new-model"
if is_tf_available():
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = NewModelConfig
@require_tf
class _A ( unittest.TestCase ):
@slow
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = """bert-base-cased"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[Any] = """bert-base-cased"""
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = TFAutoModelForPreTraining.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[int] = TFAutoModelForCausalLM.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = TFAutoModelForCausalLM.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCAmelCase : Any = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Any = TFAutoModelForQuestionAnswering.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_tensorflow_probability
def __A ( self ) -> int:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 14_410 )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 14_410 )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : List[Any] = copy.deepcopy(model.config )
__UpperCAmelCase : Dict = ["""FunnelBaseModel"""]
__UpperCAmelCase : Tuple = TFAutoModel.from_config(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__UpperCAmelCase ):
auto_class.register(__UpperCAmelCase , __UpperCAmelCase )
auto_class.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
auto_class.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : Tuple = BertModelTester(self ).get_config()
__UpperCAmelCase : Optional[Any] = NewModelConfig(**tiny_config.to_dict() )
__UpperCAmelCase : str = auto_class.from_config(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Dict = auto_class.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __A ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained("""bert-base""" )
def __A ( self ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCAmelCase : List[str] = TFAutoModel.from_pretrained(__UpperCAmelCase , revision="""aaaaaa""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(__UpperCAmelCase , """Use `from_pt=True` to load this model""" ):
__UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
# Make sure we have cached the model.
__UpperCAmelCase : Tuple = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__UpperCAmelCase : Optional[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCAmelCase : List[str] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__UpperCAmelCase : List[str] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 16 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column
__UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )]
def __str__( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__UpperCAmelCase : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
__UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) )
__UpperCAmelCase : Optional[int] = f'%{max_element_length}s'
# Make string and return
def single_line(__UpperCAmelCase ) -> str:
nonlocal string_format_identifier
__UpperCAmelCase : Any = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
'''simple docstring'''
return str(self )
def __A ( self , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = value
def __add__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
__UpperCAmelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : Dict = -self[r, c]
return result
def __sub__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication
__UpperCAmelCase : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[Any] = self[r, c] * another
return result
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
__UpperCAmelCase : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})'
raise TypeError(__UpperCAmelCase )
def __A ( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[str] = self[r, c]
return result
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__UpperCAmelCase : Optional[Any] = v.transpose()
__UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Dict = Matrix(3 , 3 , 0 )
for i in range(3 ):
__UpperCAmelCase : Tuple = 1
print(f'a^(-1) is {ainv}' )
# u, v
__UpperCAmelCase : Dict = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3
__UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' )
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 16 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """marian"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=5_8_1_0_1 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=1_0_2_4 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : Dict=4_0_9_6 , UpperCamelCase__ : Tuple=1_6 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : List[str]=4_0_9_6 , UpperCamelCase__ : int=1_6 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=1_0_2_4 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : List[str]=0.0_2 , UpperCamelCase__ : List[str]=5_8_1_0_0 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[int]=5_8_1_0_0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : int=True , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = decoder_vocab_size or vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : List[str] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase = {0: 'batch'}
UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(UpperCamelCase__ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Dict ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super().outputs
else:
UpperCamelCase = super(UpperCamelCase__ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(UpperCamelCase__ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def A ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
UpperCamelCase = seq_length if not self.use_past else 1
UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
UpperCamelCase = common_inputs['decoder_input_ids'].shape[1]
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = decoder_seq_length + 3
UpperCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
UpperCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase = min(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
UpperCamelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
UpperCamelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def A ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = common_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def A ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
UpperCamelCase = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def A ( self : Dict , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
UpperCamelCase = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCamelCase = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def A ( self : Dict ):
"""simple docstring"""
return 1E-4
| 28 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case_ = 'vivit'
def __init__( self : Any , snake_case : List[Any]=224 , snake_case : int=32 , snake_case : str=[2, 16, 16] , snake_case : List[Any]=3 , snake_case : List[str]=768 , snake_case : List[str]=12 , snake_case : str=12 , snake_case : Optional[int]=3072 , snake_case : int="gelu_fast" , snake_case : Optional[Any]=0.0 , snake_case : Any=0.0 , snake_case : List[Any]=0.02 , snake_case : Tuple=1e-06 , snake_case : Tuple=True , **snake_case : List[str] , ):
'''simple docstring'''
A__ : Tuple = hidden_size
A__ : List[str] = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Union[str, Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[Any] = initializer_range
A__ : Dict = layer_norm_eps
A__ : Union[str, Any] = image_size
A__ : Optional[int] = num_frames
A__ : str = tubelet_size
A__ : int = num_channels
A__ : Dict = qkv_bias
super().__init__(**_a )
| 370 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296 | 0 |
from __future__ import annotations
_lowercase: Tuple = list[list[int]]
# assigning initial values to the grid
_lowercase: Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowercase: Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a( A : Matrix , A : int , A : int , A : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a( A : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a( A : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(A ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
a = digit
if sudoku(A ) is not None:
return grid
a = 0
return None
def a( A : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(A , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_lowercase: List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 227 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 227 | 1 |
'''simple docstring'''
from collections.abc import Callable
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
__a : float = a
__a : float = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__a : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
__a : Any = mid
else:
__a : Dict = mid
__a : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase_ : str = {'facebook/blenderbot_small-90M': 5_12}
def _lowerCamelCase ( lowercase : List[Any] ) -> List[str]:
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
_a = set(lowercase )
return pairs
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : List[Any] , __a : List[Any] , __a : List[Any] , __a : Optional[int]="__start__" , __a : Union[str, Any]="__end__" , __a : Any="__unk__" , __a : Union[str, Any]="__null__" , **__a : Tuple , ):
super().__init__(unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a )
with open(__a , encoding="utf-8" ) as vocab_handle:
_a = json.load(__a )
_a = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
_a = merges_handle.read().split("\n" )[1:-1]
_a = [tuple(merge.split() ) for merge in merges]
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = {}
@property
def UpperCamelCase__ ( self : Any ):
return len(self.encoder )
def UpperCamelCase__ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Tuple , __a : str ):
if token in self.cache:
return self.cache[token]
_a = re.sub("([.,!?()])" , r" \1" , __a )
_a = re.sub("(')" , r" \1 " , __a )
_a = re.sub(r"\s{2,}" , " " , __a )
if "\n" in token:
_a = token.replace("\n" , " __newln__" )
_a = token.split(" " )
_a = []
for token in tokens:
if not len(__a ):
continue
_a = token.lower()
_a = tuple(__a )
_a = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_a = get_pairs(__a )
if not pairs:
words.append(__a )
continue
while True:
_a = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__a ):
try:
_a = word.index(__a , __a )
new_word.extend(word[i:j] )
_a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__a )
_a = new_word
if len(__a ) == 1:
break
else:
_a = get_pairs(__a )
_a = "@@ ".join(__a )
_a = word[:-4]
_a = word
words.append(__a )
return " ".join(__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : str ):
_a = []
_a = re.findall(r"\S+\n?" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def UpperCamelCase__ ( self : Optional[int] , __a : str ):
_a = token.lower()
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : Tuple , __a : List[str] ):
_a = " ".join(__a ).replace("@@ " , "" ).strip()
return out_string
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
_a = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_a = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
| 63 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def snake_case ( A__ ,A__ ,A__ ,A__ ):
def constraint_to_multiple_of(A__ ,A__ ,A__=0 ,A__=None ):
UpperCAmelCase_ : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ : List[Any] = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ : int = (output_size, output_size) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else output_size
UpperCAmelCase_ : Dict = get_image_size(lowerCAmelCase__ )
UpperCAmelCase_ : Any = output_size
# determine new height and width
UpperCAmelCase_ : Tuple = output_height / input_height
UpperCAmelCase_ : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ : Optional[Any] = scale_width
else:
# fit height
UpperCAmelCase_ : str = scale_height
UpperCAmelCase_ : Optional[Any] = constraint_to_multiple_of(scale_height * input_height ,multiple=lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = constraint_to_multiple_of(scale_width * input_width ,multiple=lowerCAmelCase__ )
return (new_height, new_width)
class UpperCamelCase_ (__SCREAMING_SNAKE_CASE ):
__magic_name__ = ["pixel_values"]
def __init__( self : int , lowerCAmelCase_ : str = True , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Dict = PILImageResampling.BILINEAR , lowerCAmelCase_ : List[Any] = False , lowerCAmelCase_ : str = 1 , lowerCAmelCase_ : Union[str, Any] = True , lowerCAmelCase_ : Optional[int] = 1 / 255 , lowerCAmelCase_ : Union[str, Any] = True , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Tuple = None , **lowerCAmelCase_ : str , ) -> None:
super().__init__(**__UpperCAmelCase )
UpperCAmelCase_ : List[str] = size if size is not None else {"""height""": 384, """width""": 384}
UpperCAmelCase_ : Tuple = get_size_dict(__UpperCAmelCase )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : List[str] = keep_aspect_ratio
UpperCAmelCase_ : Dict = ensure_multiple_of
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : Any = do_rescale
UpperCAmelCase_ : str = rescale_factor
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any = False , lowerCAmelCase_ : Optional[Any] = 1 , lowerCAmelCase_ : List[Any] = PILImageResampling.BICUBIC , lowerCAmelCase_ : Dict = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(
__UpperCAmelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__UpperCAmelCase , multiple=__UpperCAmelCase , )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : Tuple , ) -> List[str]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Optional[Any] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> PIL.Image.Image:
UpperCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = size if size is not None else self.size
UpperCAmelCase_ : Any = get_size_dict(__UpperCAmelCase )
UpperCAmelCase_ : str = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ : List[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ : int = resample if resample is not None else self.resample
UpperCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ : Any = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ : Tuple = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
UpperCAmelCase_ : List[str] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
UpperCAmelCase_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] = None ) -> List[str]:
UpperCAmelCase_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__UpperCAmelCase ):
UpperCAmelCase_ : Any = target_sizes.numpy()
UpperCAmelCase_ : Optional[Any] = []
for idx in range(len(__UpperCAmelCase ) ):
UpperCAmelCase_ : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCAmelCase )
UpperCAmelCase_ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
UpperCAmelCase_ : Dict = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 366 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase_ : list[float] = list(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = degree
def __add__( self : int , lowerCAmelCase_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[str] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int | float ) -> int | float:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
UpperCAmelCase_ : str = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Union[str, Any] , lowerCAmelCase_ : object ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowerCAmelCase_ : object ) -> bool:
return not self.__eq__(lowerCAmelCase_ )
| 253 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "new-model"
if is_tf_available():
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = NewModelConfig
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = '''bert-base-cased'''
lowercase__ : Optional[int] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowercase__ : str = '''bert-base-cased'''
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : int = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : int = TFAutoModelForMaskedLM.from_pretrained(_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[str] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModelForSequenceClassification.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
@require_tensorflow_probability
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(_snake_case )
lowercase__ , lowercase__ : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
_snake_case ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Dict = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : Dict = TFAutoModelWithLMHead.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : str = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Tuple = copy.deepcopy(model.config )
lowercase__ : Dict = ['''FunnelBaseModel''']
lowercase__ : List[Any] = TFAutoModel.from_config(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case )
lowercase__ : Optional[Any] = TFAutoModel.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
try:
AutoConfig.register('''new-model''' ,_snake_case )
lowercase__ : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_snake_case ):
auto_class.register(_snake_case ,_snake_case )
auto_class.register(_snake_case ,_snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_snake_case ):
auto_class.register(_snake_case ,_snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ : List[Any] = BertModelTester(self ).get_config()
lowercase__ : Optional[Any] = NewModelConfig(**tiny_config.to_dict() )
lowercase__ : Union[str, Any] = auto_class.from_config(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case )
lowercase__ : Dict = auto_class.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
_snake_case ,'''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase__ : Any = TFAutoModel.from_pretrained('''bert-base''' )
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_snake_case ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase__ : int = TFAutoModel.from_pretrained(_snake_case ,revision='''aaaaaa''' )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_snake_case ,'''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' ,):
lowercase__ : Tuple = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(_snake_case ,'''Use `from_pt=True` to load this model''' ):
lowercase__ : str = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : str = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase__ : Any = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
lowercase__ : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
lowercase__ : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def _UpperCAmelCase (UpperCamelCase_ : Callable[[int | float], int | float] , UpperCamelCase_ : int | float , UpperCamelCase_ : int | float , UpperCamelCase_ : int = 100 , ):
'''simple docstring'''
_lowerCAmelCase : Dict = x_start
_lowerCAmelCase : Any = fnc(__lowerCAmelCase )
_lowerCAmelCase : Tuple = 0.0
for _ in range(__lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
_lowerCAmelCase : Any = (x_end - x_start) / steps + xa
_lowerCAmelCase : int = fnc(__lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_lowerCAmelCase : Optional[int] = xa
_lowerCAmelCase : str = fxa
return length
if __name__ == "__main__":
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_lowerCamelCase : Any = 1_0
while i <= 1_0_0_0_0_0:
print(F'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''')
i *= 1_0
| 353 |
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = len(UpperCamelCase_ ) + 1
_lowerCAmelCase : List[Any] = len(UpperCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : List[Any] = [[0 for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase_ ):
for j in range(1 , UpperCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : int = dp[i - 1][j]
else:
_lowerCAmelCase : List[str] = 0
else:
_lowerCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCamelCase : Any = "aab"
_lowerCamelCase : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 159 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__a = get_logger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , snake_case_ : Optional[str] = None ):
snake_case__ : List[Any] = (
os.path.join(snake_case_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case__ : Any = Extractor
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case__ : str = os.path.abspath(snake_case_ )
return os.path.join(self.extract_dir , hash_url_to_filename(snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : bool ):
return force_extract or (
not os.path.isfile(snake_case_ ) and not (os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ))
)
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : bool = False ):
snake_case__ : Dict = self.extractor.infer_extractor_format(snake_case_ )
if not extractor_format:
return input_path
snake_case__ : List[Any] = self._get_output_path(snake_case_ )
if self._do_extract(snake_case_ , snake_case_ ):
self.extractor.extract(snake_case_ , snake_case_ , snake_case_ )
return output_path
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
@classmethod
@abstractmethod
def lowerCamelCase ( cls : Optional[int] , snake_case_ : Union[Path, str] , **snake_case_ : Tuple ):
...
@staticmethod
@abstractmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
...
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = []
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : int ):
with open(snake_case_ , """rb""" ) as f:
return f.read(snake_case_ )
@classmethod
def lowerCamelCase ( cls : Any , snake_case_ : Union[Path, str] , snake_case_ : bytes = b"" ):
if not magic_number:
snake_case__ : Optional[Any] = max(len(snake_case_ ) for cls_magic_number in cls.magic_numbers )
try:
snake_case__ : List[str] = cls.read_magic_number(snake_case_ , snake_case_ )
except OSError:
return False
return any(magic_number.startswith(snake_case_ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , snake_case_ : Union[Path, str] , **snake_case_ : Union[str, Any] ):
return tarfile.is_tarfile(snake_case_ )
@staticmethod
def lowerCamelCase ( snake_case_ : List[str] , snake_case_ : Optional[Any] ):
def resolved(snake_case_ : str ) -> str:
return os.path.realpath(os.path.abspath(snake_case_ ) )
def badpath(snake_case_ : str , snake_case_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(snake_case_ , snake_case_ ) ).startswith(snake_case_ )
def badlink(snake_case_ : str , snake_case_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case__ : List[Any] = resolved(os.path.join(snake_case_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=snake_case_ )
snake_case__ : Optional[int] = resolved(snake_case_ )
for finfo in members:
if badpath(finfo.name , snake_case_ ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(snake_case_ , snake_case_ ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(snake_case_ , snake_case_ ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
snake_case__ : List[Any] = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ , members=TarExtractor.safemembers(snake_case_ , snake_case_ ) )
tar_file.close()
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [b"\x1F\x8B"]
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
with gzip.open(snake_case_ , """rb""" ) as gzip_file:
with open(snake_case_ , """wb""" ) as extracted_file:
shutil.copyfileobj(snake_case_ , snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def lowerCamelCase ( cls : int , snake_case_ : Union[Path, str] , snake_case_ : bytes = b"" ):
if super().is_extractable(snake_case_ , magic_number=snake_case_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(snake_case_ , """rb""" ) as fp:
snake_case__ : Any = _EndRecData(snake_case_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case__ : List[str] = fp.read(snake_case_ ) # CD is where we expect it to be
if len(snake_case_ ) == sizeCentralDir:
snake_case__ : Dict = struct.unpack(snake_case_ , snake_case_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with zipfile.ZipFile(snake_case_ , """r""" ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
with lzma.open(snake_case_ ) as compressed_file:
with open(snake_case_ , """wb""" ) as extracted_file:
shutil.copyfileobj(snake_case_ , snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(snake_case_ , exist_ok=snake_case_ )
snake_case__ : Tuple = rarfile.RarFile(snake_case_ )
rf.extractall(snake_case_ )
rf.close()
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
snake_case__ : Dict = zstd.ZstdDecompressor()
with open(snake_case_ , """rb""" ) as ifh, open(snake_case_ , """wb""" ) as ofh:
dctx.copy_stream(snake_case_ , snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [b"\x42\x5A\x68"]
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
with bza.open(snake_case_ , """rb""" ) as compressed_file:
with open(snake_case_ , """wb""" ) as extracted_file:
shutil.copyfileobj(snake_case_ , snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with pyazr.SevenZipFile(snake_case_ , """r""" ) as archive:
archive.extractall(snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = [b"\x04\x22\x4D\x18"]
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(snake_case_ , """rb""" ) as compressed_file:
with open(snake_case_ , """wb""" ) as extracted_file:
shutil.copyfileobj(snake_case_ , snake_case_ )
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase ( cls : List[str] ):
return max(
len(snake_case_ )
for extractor in cls.extractors.values()
if issubclass(snake_case_ , snake_case_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(snake_case_ , magic_number_length=snake_case_ )
except OSError:
return b""
@classmethod
def lowerCamelCase ( cls : int , snake_case_ : Union[Path, str] , snake_case_ : bool = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=snake_case_ , )
snake_case__ : str = cls.infer_extractor_format(snake_case_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase ( cls : Optional[int] , snake_case_ : Union[Path, str] ): # <Added version="2.4.0"/>
snake_case__ : str = cls._get_magic_number_max_length()
snake_case__ : Dict = cls._read_magic_number(snake_case_ , snake_case_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(snake_case_ , magic_number=snake_case_ ):
return extractor_format
@classmethod
def lowerCamelCase ( cls : Any , snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] , snake_case_ : Optional[str] = None , snake_case_ : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(snake_case_ ) , exist_ok=snake_case_ )
# Prevent parallel extractions
snake_case__ : str = str(Path(snake_case_ ).with_suffix(""".lock""" ) )
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(snake_case_ , snake_case_ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=snake_case_ , )
snake_case__ : int = extractor if extractor != """deprecated""" else extractor_format
else:
snake_case__ : str = cls.extractors[extractor_format]
return extractor.extract(snake_case_ , snake_case_ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=snake_case_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(snake_case_ ):
return extractor.extract(snake_case_ , snake_case_ )
| 35 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__snake_case : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__snake_case : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__snake_case : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
__snake_case : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__snake_case : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__snake_case : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__snake_case : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
__snake_case : str = field(
default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
__snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 296 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["pixel_values"]
def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case )
lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Union[str, Any] = size
lowercase__ : Any = resample
lowercase__ : int = rescale_factor
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case )
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case )
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case )
if not is_batched(_snake_case ):
lowercase__ : Optional[Any] = [images]
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 302 | 1 |
from typing import Any
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
if not input_list:
return []
__snake_case: Optional[Any] = [input_list.count(UpperCamelCase__) for value in input_list]
__snake_case: Dict = max(UpperCamelCase__) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase__) if value == y})
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294 | 0 |
import os
import numpy
import onnx
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = a.name
SCREAMING_SNAKE_CASE = b.name
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = a == b
SCREAMING_SNAKE_CASE = name_a
SCREAMING_SNAKE_CASE = name_b
return res
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ):
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = list(model.graph.initializer )
SCREAMING_SNAKE_CASE = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE = inits[i].name
SCREAMING_SNAKE_CASE = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = os.path.dirname(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = os.path.basename(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = list(model.graph.initializer )
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(len(UpperCAmelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase__ )
dup_set.add(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = inits[j].data_type
SCREAMING_SNAKE_CASE = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase__ )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE = inits[i].name
SCREAMING_SNAKE_CASE = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ )
_remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = "optimized_" + model_file_name
SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
onnx.save(UpperCAmelCase__ , UpperCAmelCase__ )
return new_model
| 357 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE = DetaConfig(
backbone_config=UpperCAmelCase__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=UpperCAmelCase__ , with_box_refine=UpperCAmelCase__ , two_stage=UpperCAmelCase__ , )
# set labels
SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE = 3_6_6
SCREAMING_SNAKE_CASE = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE = 9_1
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size:]
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = get_deta_config(UpperCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(UpperCAmelCase__ , param.shape )
# rename keys
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = DetaForObjectDetection(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(UpperCAmelCase__ )
# load image processor
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = processor(images=UpperCAmelCase__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
SCREAMING_SNAKE_CASE = model(pixel_values.to(UpperCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 206 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class _A ( __magic_name__ , __magic_name__):
SCREAMING_SNAKE_CASE : Dict = '''maskformer-swin'''
SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = embed_dim
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : Dict = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = num_heads
SCREAMING_SNAKE_CASE_ : List[Any] = window_size
SCREAMING_SNAKE_CASE_ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE_ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ : str = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
SCREAMING_SNAKE_CASE_ : List[str] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 253 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , _A : int , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : List[str]=None , _A : Optional[Any]="resnet50" , _A : Optional[int]=3 , _A : List[str]=32 , _A : Tuple=3 , _A : int=True , _A : Optional[Any]=True , ) -> Tuple:
__magic_name__ : Dict = parent
__magic_name__ : Dict = out_indices if out_indices is not None else [4]
__magic_name__ : Any = stage_names
__magic_name__ : Optional[int] = out_features
__magic_name__ : int = backbone
__magic_name__ : Union[str, Any] = batch_size
__magic_name__ : Any = image_size
__magic_name__ : List[str] = num_channels
__magic_name__ : Optional[Any] = use_pretrained_backbone
__magic_name__ : List[Any] = is_training
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Dict = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self : int ) -> Optional[int]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[Any] , _A : List[str] ) -> Any:
__magic_name__ : List[Any] = TimmBackbone(config=_A )
model.to(_A )
model.eval()
with torch.no_grad():
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ : Dict = self.prepare_config_and_inputs()
__magic_name__ : Optional[int] = config_and_inputs
__magic_name__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
A_ : Dict = False
A_ : Union[str, Any] = False
A_ : List[Any] = False
A_ : Tuple = False
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
__magic_name__ : Optional[Any] = TimmBackboneModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __lowerCAmelCase ( self : str ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : int ) -> List[Any]:
__magic_name__ : Optional[Any] = 'resnet18'
__magic_name__ : Optional[Any] = 'microsoft/resnet-18'
__magic_name__ : Optional[int] = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A )
__magic_name__ : Optional[Any] = AutoBackbone.from_pretrained(_A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__magic_name__ : Optional[Any] = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A , out_indices=[1, 2, 3] )
__magic_name__ : List[str] = AutoBackbone.from_pretrained(_A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __lowerCAmelCase ( self : int ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __lowerCAmelCase ( self : int ) -> Dict:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowerCAmelCase ( self : Dict ) -> Any:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __lowerCAmelCase ( self : int ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __lowerCAmelCase ( self : Dict ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = model_class(_A )
__magic_name__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int = [*signature.parameters.keys()]
__magic_name__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = True
__magic_name__ : List[str] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__magic_name__ : Optional[Any] = self.all_model_classes[0]
__magic_name__ : str = model_class(_A )
model.to(_A )
__magic_name__ : Dict = self._prepare_for_class(_A , _A )
__magic_name__ : List[Any] = model(**_A )
__magic_name__ : str = outputs[0][-1]
# Encoder-/Decoder-only models
__magic_name__ : Union[str, Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__magic_name__ : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(_A )
model.to(_A )
model.eval()
__magic_name__ : str = model(**_A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__magic_name__ : Optional[Any] = copy.deepcopy(_A )
__magic_name__ : Any = None
__magic_name__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(**_A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__magic_name__ : Union[str, Any] = copy.deepcopy(_A )
__magic_name__ : Optional[Any] = False
__magic_name__ : List[str] = model_class(_A )
model.to(_A )
model.eval()
__magic_name__ : int = model(**_A )
| 352 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase :int = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=8 ):
"""simple docstring"""
__magic_name__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : MultilingualCLIP , _A : XLMRobertaTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, DDPMScheduler] , _A : VQModel , ) -> int:
super().__init__()
self.register_modules(
text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , movq=_A , )
__magic_name__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : Dict , _A : str , _A : List[str] ) -> str:
if latents is None:
__magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__magic_name__ : int = latents.to(_A )
__magic_name__ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : List[str] , _A : List[str] , _A : List[Any] , _A : str=None , ) -> Dict:
__magic_name__ : Optional[Any] = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
__magic_name__ : str = self.tokenizer(
_A , padding='max_length' , truncation=_A , max_length=77 , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[Any] = text_inputs.input_ids
__magic_name__ : Optional[Any] = self.tokenizer(_A , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_A , _A ):
__magic_name__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__magic_name__ : Union[str, Any] = text_input_ids.to(_A )
__magic_name__ : Dict = text_inputs.attention_mask.to(_A )
__magic_name__ , __magic_name__ : str = self.text_encoder(
input_ids=_A , attention_mask=_A )
__magic_name__ : Tuple = prompt_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : int = text_encoder_hidden_states.repeat_interleave(_A , dim=0 )
__magic_name__ : Union[str, Any] = text_mask.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : List[str]
if negative_prompt is None:
__magic_name__ : Optional[Any] = [''] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !='
F' {type(_A )}.' )
elif isinstance(_A , _A ):
__magic_name__ : int = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__magic_name__ : Dict = negative_prompt
__magic_name__ : List[str] = self.tokenizer(
_A , padding='max_length' , max_length=77 , truncation=_A , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[int] = uncond_input.input_ids.to(_A )
__magic_name__ : Optional[Any] = uncond_input.attention_mask.to(_A )
__magic_name__ , __magic_name__ : int = self.text_encoder(
input_ids=_A , attention_mask=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : List[str] = negative_prompt_embeds.shape[1]
__magic_name__ : str = negative_prompt_embeds.repeat(1 , _A )
__magic_name__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A )
__magic_name__ : Any = uncond_text_encoder_hidden_states.shape[1]
__magic_name__ : Optional[int] = uncond_text_encoder_hidden_states.repeat(1 , _A , 1 )
__magic_name__ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _A , -1 )
__magic_name__ : List[Any] = uncond_text_mask.repeat_interleave(_A , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ : str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCAmelCase ( self : Dict , _A : List[Any]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__magic_name__ : List[Any] = torch.device(F'cuda:{gpu_id}' )
__magic_name__ : Dict = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __lowerCAmelCase ( self : List[Any] , _A : List[str]=0 ) -> str:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__magic_name__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ : Union[str, Any] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ : List[str] = cpu_offload_with_hook(self.safety_checker , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__magic_name__ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : int ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : int , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Optional[Union[str, List[str]]] = None , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Optional[int]:
if isinstance(_A , _A ):
__magic_name__ : Optional[int] = 1
elif isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = len(_A )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_A )}' )
__magic_name__ : Tuple = self._execution_device
__magic_name__ : Any = batch_size * num_images_per_prompt
__magic_name__ : int = guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = self._encode_prompt(
_A , _A , _A , _A , _A )
if isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__magic_name__ : Dict = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__magic_name__ : Tuple = self.scheduler.timesteps
__magic_name__ : Optional[int] = self.unet.config.in_channels
__magic_name__ , __magic_name__ : Dict = get_new_h_w(_A , _A , self.movq_scale_factor )
# create initial latent
__magic_name__ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Tuple = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__magic_name__ : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ : Dict = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ : List[str] = variance_pred.chunk(2 )
__magic_name__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , ).prev_sample
# post-processing
__magic_name__ : int = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__magic_name__ : Dict = image * 0.5 + 0.5
__magic_name__ : str = image.clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : str = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A ) | 275 | 0 |
def _a ( a :int , a :int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 0 |
import os
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
snake_case_ = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
snake_case_ = os.path.join(lowerCAmelCase_ , "triangle.txt" )
with open(lowerCAmelCase_ ) as f:
snake_case_ = f.readlines()
snake_case_ = []
for line in triangle:
snake_case_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(lowerCAmelCase_ ) )
a.append(lowerCAmelCase_ )
for i in range(1 , len(lowerCAmelCase_ ) ):
for j in range(len(a[i] ) ):
snake_case_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCAmelCase_ , lowerCAmelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 159 | 0 |
from __future__ import annotations
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : List[str] ,__lowerCamelCase : int ):
'''simple docstring'''
a = data
a = None
a = None
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE__ ( ) -> Dict: # Main function for testing.
"""simple docstring"""
a = Node(1 )
a = Node(2 )
a = Node(3 )
a = Node(4 )
a = Node(5 )
a = Node(6 )
a = Node(7 )
a = Node(8 )
a = Node(9 )
print(is_full_binary_tree(snake_case_ ) )
print(depth_of_tree(snake_case_ ) )
print('''Tree is: ''' )
display(snake_case_ )
if __name__ == "__main__":
main()
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'vit_mae'
def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = decoder_num_attention_heads
a = decoder_hidden_size
a = decoder_num_hidden_layers
a = decoder_intermediate_size
a = mask_ratio
a = norm_pix_loss
| 330 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] ='llama'
__lowerCamelCase : int =['past_key_values']
def __init__( self : Dict , __lowercase : Tuple=32000 , __lowercase : int=4096 , __lowercase : Optional[int]=11008 , __lowercase : Tuple=32 , __lowercase : Tuple=32 , __lowercase : Tuple=None , __lowercase : int="silu" , __lowercase : Union[str, Any]=2048 , __lowercase : int=0.02 , __lowercase : int=1E-6 , __lowercase : List[str]=True , __lowercase : Optional[int]=0 , __lowercase : str=1 , __lowercase : int=2 , __lowercase : Union[str, Any]=1 , __lowercase : Union[str, Any]=False , __lowercase : Dict=None , **__lowercase : Any , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a = num_attention_heads
__a = num_key_value_heads
__a = hidden_act
__a = initializer_range
__a = rms_norm_eps
__a = pretraining_tp
__a = use_cache
__a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase , )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"got {self.rope_scaling}" )
__a = self.rope_scaling.get("""type""" , __lowercase )
__a = self.rope_scaling.get("""factor""" , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 302 |
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowercase__ : int = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
lowercase__ : Optional[int] = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowercase__ : int = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowercase__ : Optional[Any] = sorted(arg_to_scheduler.keys())
lowercase__ : List[str] = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class __lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]="base" , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCAmelCase__ )
_UpperCamelCase = 0
_UpperCamelCase = Path(self.hparams.output_dir )
_UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
_UpperCamelCase = config
_UpperCamelCase = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , lowerCAmelCase__ , lowerCAmelCase__ ):
assert hasattr(self.config , lowerCAmelCase__ ), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , lowerCAmelCase__ , getattr(self.hparams , lowerCAmelCase__ ) )
if tokenizer is None:
_UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCAmelCase__ , )
else:
_UpperCamelCase = tokenizer
_UpperCamelCase = MODEL_MODES[mode]
if model is None:
_UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowerCAmelCase__ , )
else:
_UpperCamelCase = model
def snake_case__ ( self : Optional[Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_type.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
_UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_UpperCamelCase = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model
_UpperCamelCase = ["bias", "LayerNorm.weight"]
_UpperCamelCase = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
_UpperCamelCase = Adafactor(
lowerCAmelCase__ , lr=self.hparams.learning_rate , scale_parameter=lowerCAmelCase__ , relative_step=lowerCAmelCase__ )
else:
_UpperCamelCase = AdamW(
lowerCAmelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_UpperCamelCase = optimizer
_UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
return self.validation_step(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
return self.validation_end(lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
_UpperCamelCase = len(self.test_dataloader().dataset )
else:
_UpperCamelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
_UpperCamelCase = len(self.train_dataloader().dataset )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict = False ) -> List[str]:
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.train_loader
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowerCAmelCase__ , list(filter(lowerCAmelCase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = self.output_dir.joinpath('''best_tfmr''' )
_UpperCamelCase = self.step_count
self.model.save_pretrained(lowerCAmelCase__ )
self.tokenizer.save_pretrained(lowerCAmelCase__ )
@staticmethod
def snake_case__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''cache''' ) , type=lowerCAmelCase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowerCAmelCase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowerCAmelCase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowerCAmelCase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowerCAmelCase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=lowerCAmelCase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowerCAmelCase__ , metavar=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowerCAmelCase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=lowerCAmelCase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowerCAmelCase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowerCAmelCase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowerCAmelCase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowerCAmelCase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowerCAmelCase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCAmelCase__ )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = trainer.lr_schedulers[0]["scheduler"]
_UpperCamelCase = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
_UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(lowerCAmelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowerCAmelCase__ , str(metrics[key] ) ) )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
_UpperCamelCase = trainer.callback_metrics
# Log and save results to file
_UpperCamelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowerCAmelCase__ , '''w''' ) as writer:
for key in sorted(lowerCAmelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowerCAmelCase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowerCAmelCase__ , str(metrics[key] ) ) )
def a__ ( lowercase : Tuple, lowercase : Tuple ) -> None:
"""simple docstring"""
parser.add_argument(
'''--output_dir''', default=str(Path(_lowercase ).parent / '''test_run''' / '''model_checkpoints''' ), type=_lowercase, help='''The output directory where the model predictions and checkpoints will be written.''', )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=_lowercase, default='''O2''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_tpu_cores''', dest='''tpu_cores''', type=_lowercase )
parser.add_argument('''--max_grad_norm''', dest='''gradient_clip_val''', default=1.0, type=_lowercase, help='''Max gradient norm''' )
parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' )
parser.add_argument('''--do_predict''', action='''store_true''', help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''', dest='''accumulate_grad_batches''', type=_lowercase, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', )
parser.add_argument('''--seed''', type=_lowercase, default=42, help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''', default=str(Path(_lowercase ).parent / '''test_run''' / '''dummy-train-data''' ), type=_lowercase, help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''', )
def a__ ( lowercase : BaseTransformer, lowercase : argparse.Namespace, lowercase : Tuple=None, lowercase : int=True, lowercase : List[str]=[], lowercase : Union[str, Any]=None, lowercase : Any=None, **lowercase : Any, ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_lowercase )
# add custom checkpoints
if checkpoint_callback is None:
_UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix='''checkpoint''', monitor='''val_loss''', mode='''min''', save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_lowercase )
if logging_callback is None:
_UpperCamelCase = LoggingCallback()
_UpperCamelCase = {}
if args.fpaa:
_UpperCamelCase = 16
if args.gpus > 1:
_UpperCamelCase = "auto"
_UpperCamelCase = "ddp"
_UpperCamelCase = args.accumulate_grad_batches
_UpperCamelCase = None
_UpperCamelCase = "auto"
_UpperCamelCase = pl.Trainer.from_argparse_args(
_lowercase, weights_summary=_lowercase, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=_lowercase, val_check_interval=1, num_sanity_val_steps=2, **_lowercase, )
if args.do_train:
trainer.fit(_lowercase )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 363 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = CamembertTokenizer
_snake_case : str = CamembertTokenizerFast
_snake_case : int = True
_snake_case : List[str] = True
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1004 )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase__ , )
| 287 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class a (__UpperCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 'transfo-xl'
__UpperCAmelCase : Union[str, Any] = ['mems']
__UpperCAmelCase : Union[str, Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , lowerCamelCase : Union[str, Any]=267735 , lowerCamelCase : Any=[20000, 40000, 200000] , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : int=1024 , lowerCamelCase : List[Any]=16 , lowerCamelCase : List[str]=64 , lowerCamelCase : Optional[int]=4096 , lowerCamelCase : int=4 , lowerCamelCase : int=False , lowerCamelCase : Optional[Any]=18 , lowerCamelCase : Tuple=1600 , lowerCamelCase : Optional[int]=1000 , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Tuple=0 , lowerCamelCase : int=-1 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Any=0.1 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Union[str, Any]="normal" , lowerCamelCase : List[Any]=0.01 , lowerCamelCase : Any=0.01 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : Tuple=0 , **lowerCamelCase : Union[str, Any] , ) -> Any:
__snake_case : int = vocab_size
__snake_case : int = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
__snake_case : Optional[Any] = [False] + [True] * len(self.cutoffs )
else:
__snake_case : List[Any] = [False] + [False] * len(self.cutoffs )
__snake_case : str = d_model
__snake_case : Optional[Any] = d_embed
__snake_case : List[str] = d_head
__snake_case : Any = d_inner
__snake_case : List[Any] = div_val
__snake_case : Tuple = pre_lnorm
__snake_case : List[Any] = n_layer
__snake_case : List[str] = n_head
__snake_case : List[str] = mem_len
__snake_case : Dict = same_length
__snake_case : List[Any] = attn_type
__snake_case : Optional[int] = clamp_len
__snake_case : int = sample_softmax
__snake_case : List[str] = adaptive
__snake_case : Dict = dropout
__snake_case : int = dropatt
__snake_case : Optional[int] = untie_r
__snake_case : List[Any] = init
__snake_case : int = init_range
__snake_case : Dict = proj_init_std
__snake_case : Optional[int] = init_std
__snake_case : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Tuple ) -> Tuple:
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __snake_case ( self : str , lowerCamelCase : List[Any] ) -> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 123 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__(self , lowercase , **lowercase ):
return super().__call__(lowercase , **lowercase )
def _a (self , **lowercase ):
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A_ : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _a (self , lowercase , lowercase=None , lowercase="This is a sound of {}." ):
if isinstance(lowercase , lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A_ : Dict = requests.get(lowercase ).content
else:
with open(lowercase , """rb""" ) as f:
A_ : List[str] = f.read()
if isinstance(lowercase , lowercase ):
A_ : List[Any] = ffmpeg_read(lowercase , self.feature_extractor.sampling_rate )
if not isinstance(lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A_ : int = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A_ : List[Any] = candidate_labels
A_ : str = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : Optional[Any] = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Optional[Any] = [text_inputs]
return inputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_inputs.pop("""candidate_labels""" )
A_ : List[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowercase ):
A_ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
A_ : Optional[int] = text_inputs[0][0]
A_ : str = self.model(**lowercase , **lowercase )
A_ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
A_ : List[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
A_ : Optional[Any] = logits.softmax(dim=0 )
A_ : str = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A_ : Optional[int] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result | 206 | 0 |
import random
from typing import Any
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case : List[Any] = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case : str = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case : str = data[b], data[a]
return data
if __name__ == "__main__":
lowercase_ = [0, 1, 2, 3, 4, 5, 6, 7]
lowercase_ = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 355 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
UpperCAmelCase = {
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
class UpperCAmelCase_ ( _UpperCAmelCase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = []
def __init__( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : str="<unk>" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : List[str]="<pad>" , __UpperCamelCase : Tuple="[SEP]" , __UpperCamelCase : List[str]="[MASK]" , __UpperCamelCase : Any="[CLS]" , __UpperCamelCase : Union[str, Any] = None , **__UpperCamelCase : Optional[int] , ) -> None:
_UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
_UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
_UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
_UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
_UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
_UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , mask_token=A_ , cls_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _UpperCamelCase ( self : Dict ) -> str:
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : int , __UpperCamelCase : str ) -> Optional[Any]:
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : str , __UpperCamelCase : int ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] ) -> str:
return self.sp_model.piece_to_id(A_ )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] ) -> str:
_UpperCamelCase = self.sp_model.IdToPiece(A_ )
return token
def _UpperCamelCase ( self : str , __UpperCamelCase : Any ) -> Tuple:
_UpperCamelCase = []
_UpperCamelCase = ''''''
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
_UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any = False , __UpperCamelCase : int = None , __UpperCamelCase : List[Any] = True , **__UpperCamelCase : Union[str, Any] , ) -> str:
_UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , A_ )
_UpperCamelCase = self.convert_ids_to_tokens(A_ , skip_special_tokens=A_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCamelCase = []
_UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
_UpperCamelCase = []
sub_texts.append(A_ )
else:
current_sub_text.append(A_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCamelCase = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(A_ ) )
else:
_UpperCamelCase = ''''''.join(A_ )
_UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCamelCase = self.clean_up_tokenization(A_ )
return clean_text
else:
return text
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def _UpperCamelCase ( self : str , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any = None , __UpperCamelCase : Any = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 256 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
for rt in rc.restypes:
__lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase : List[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Optional[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Tuple = torch.tensor(
lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : int = residx_atomaa_mask
__lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter]
__lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase : str = rc.atom_order[atom_name]
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : Any = residx_atomaa_mask
return protein
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray )
__lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) )
return out
| 275 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = "dpt"
def __init__( self : int , lowerCamelCase__ : int=7_68 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Dict=30_72 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : int=1E-12 , lowerCamelCase__ : Optional[Any]=3_84 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : str=[2, 5, 8, 11] , lowerCamelCase__ : Tuple="project" , lowerCamelCase__ : List[str]=[4, 2, 1, 0.5] , lowerCamelCase__ : Optional[Any]=[96, 1_92, 3_84, 7_68] , lowerCamelCase__ : int=2_56 , lowerCamelCase__ : Optional[int]=-1 , lowerCamelCase__ : Any=False , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=0.4 , lowerCamelCase__ : str=2_55 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : str=[1, 10_24, 24, 24] , lowerCamelCase__ : List[str]=[0, 1] , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : List[Any] , ) ->str:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase : List[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_UpperCAmelCase : str = BitConfig(**lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase : int = BitConfig(**lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_UpperCAmelCase : str = backbone_featmap_shape
_UpperCAmelCase : int = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : str = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : int = qkv_bias
_UpperCAmelCase : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
_UpperCAmelCase : Optional[Any] = readout_type
_UpperCAmelCase : str = reassemble_factors
_UpperCAmelCase : List[str] = neck_hidden_sizes
_UpperCAmelCase : List[str] = fusion_hidden_size
_UpperCAmelCase : List[Any] = head_in_index
_UpperCAmelCase : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : List[Any] = use_auxiliary_head
_UpperCAmelCase : List[str] = auxiliary_loss_weight
_UpperCAmelCase : Dict = semantic_loss_ignore_index
_UpperCAmelCase : Optional[int] = semantic_classifier_dropout
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : int = self.backbone_config.to_dict()
_UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 322 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | 0 |
"""simple docstring"""
def _a ( _snake_case = 50 ):
"""simple docstring"""
UpperCAmelCase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 234 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""GLPNFeatureExtractor"""]
_UpperCamelCase = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = np.max(A_, axis=-1, keepdims=A_ )
_lowerCamelCase : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=A_ )
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = {}
if "second_text" in kwargs:
_lowerCamelCase : Dict = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Any=None ):
"""simple docstring"""
return self.tokenizer(__lowerCAmelCase , text_pair=__lowerCAmelCase , return_tensors=self.framework )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.model(**__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = model_outputs.logits[0].numpy()
_lowerCamelCase : List[Any] = softmax(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = np.argmax(__lowerCAmelCase )
_lowerCamelCase : Any = self.model.config.idalabel[best_class]
_lowerCamelCase : Dict = probabilities[best_class].item()
_lowerCamelCase : List[str] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 72 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_lowercase : Any = {
"abeja/gpt-neox-japanese-2.7b": 2_0_4_8,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : List[str] = json.loads(f.read() )
lowercase_ : Tuple = collections.OrderedDict()
lowercase_ : str = collections.OrderedDict()
lowercase_ : int = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : List[Any] = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = b
lowercase_ : Any = idx
for wd in b:
lowercase_ : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|startoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , do_clean_text=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
lowercase_ : Union[str, Any] = do_clean_text
lowercase_ : Any = load_vocab_and_emoji(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.raw_vocab )
def _snake_case ( self ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(__SCREAMING_SNAKE_CASE , clean=self.do_clean_text )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = ''''''.join(__SCREAMING_SNAKE_CASE ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
lowercase_ : Dict = input_ids[-self.model_max_length :]
return input_ids
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Dict = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
lowercase_ : List[str] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowercase_ : List[str] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowercase_ : Union[str, Any] = token_index
writer.write(''','''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , __SCREAMING_SNAKE_CASE )
return vocab_file, emoji_file
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = vocab # same as swe
lowercase_ : List[str] = ids_to_tokens # same as bpe
lowercase_ : List[str] = emoji
lowercase_ : Tuple = np.max([len(__SCREAMING_SNAKE_CASE ) for w in self.vocab.keys()] )
lowercase_ : int = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
lowercase_ : Optional[Any] = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
lowercase_ : Optional[Any] = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
lowercase_ : Any = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowercase_ : Optional[int] = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowercase_ : Dict = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
lowercase_ : Any = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowercase_ : str = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowercase_ : Any = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ):
"""simple docstring"""
return len(self.ids_to_tokens )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = self.content_repattera.sub('''<URL>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.content_repattera.sub('''<EMAIL>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.content_repattera.sub('''<TEL>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : str = self.content_repattera.sub('''<DATE>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : str = self.content_repattera.sub('''<DATE>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.content_repattera.sub('''<PRICE>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowercase_ : str = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Optional[int] = text.replace(''' ''' , '''<SP>''' )
lowercase_ : Union[str, Any] = text.replace(''' ''' , '''<SP>''' )
lowercase_ : List[str] = text.replace('''\r\n''' , '''<BR>''' )
lowercase_ : str = text.replace('''\n''' , '''<BR>''' )
lowercase_ : List[str] = text.replace('''\r''' , '''<BR>''' )
lowercase_ : List[str] = text.replace('''\t''' , '''<TAB>''' )
lowercase_ : Any = text.replace('''—''' , '''ー''' )
lowercase_ : Optional[int] = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase_ : str = text.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if clean:
lowercase_ : List[str] = self.clean_text(__SCREAMING_SNAKE_CASE )
def check_simbol(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = x.encode()
if len(__SCREAMING_SNAKE_CASE ) == 1 and len(__SCREAMING_SNAKE_CASE ) == 2:
lowercase_ : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = x.encode()
if len(__SCREAMING_SNAKE_CASE ) == 1 and len(__SCREAMING_SNAKE_CASE ) == 3:
lowercase_ : Optional[int] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = []
while pos < len(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = min(len(__SCREAMING_SNAKE_CASE ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowercase_ : Tuple = [] # (token_id, token, pos)
for e in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 ):
lowercase_ : List[str] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__SCREAMING_SNAKE_CASE ) > 2:
lowercase_ : int = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
# the smallest token_id is adopted
lowercase_ : str = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[0] )[0]
result.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = e
else:
lowercase_ : Optional[Any] = pos + 1
lowercase_ : Union[str, Any] = text[pos:end]
if check_simbol(__SCREAMING_SNAKE_CASE ):
result.append('''<KIGOU>''' )
elif checkuae(__SCREAMING_SNAKE_CASE ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
lowercase_ : int = end
return result
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="\n" ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : Any = []
lowercase_ : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(__SCREAMING_SNAKE_CASE ).decode('''utf-8''' , errors='''replace''' ) )
lowercase_ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(__SCREAMING_SNAKE_CASE )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(__SCREAMING_SNAKE_CASE ).decode('''utf-8''' , errors='''replace''' ) )
lowercase_ : List[Any] = ''''''.join(__SCREAMING_SNAKE_CASE )
return text
| 355 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 264 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__ ( *__lowercase : List[str] , __lowercase : Union[str, Any] = None , __lowercase : Any=True , __lowercase : str=2 ) -> Optional[Any]:
"""simple docstring"""
from .. import __version__
__UpperCamelCase = take_from
__UpperCamelCase = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__UpperCamelCase = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
__UpperCamelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
__UpperCamelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__UpperCamelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__UpperCamelCase = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
__UpperCamelCase = call_frame.filename
__UpperCamelCase = call_frame.lineno
__UpperCamelCase = call_frame.function
__UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 53 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase : Tuple = arr[i]
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
# A temporary array to store all combination one by one
lowercase : Optional[int] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 20 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : str = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit_text_model'''
def __init__( self : Tuple , __magic_name__ : List[Any]=49_408 , __magic_name__ : str=512 , __magic_name__ : Dict=2_048 , __magic_name__ : Dict=12 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]="quick_gelu" , __magic_name__ : Optional[Any]=1e-5 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : str=0.02 , __magic_name__ : List[str]=1.0 , __magic_name__ : Optional[int]=0 , __magic_name__ : Tuple=49_406 , __magic_name__ : Optional[Any]=49_407 , **__magic_name__ : List[str] , ) -> Tuple:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_factor
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit_vision_model'''
def __init__( self : List[Any] , __magic_name__ : int=768 , __magic_name__ : Dict=3_072 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : List[Any]=3 , __magic_name__ : List[Any]=768 , __magic_name__ : Any=32 , __magic_name__ : Tuple="quick_gelu" , __magic_name__ : List[Any]=1e-5 , __magic_name__ : Tuple=0.0 , __magic_name__ : Any=0.02 , __magic_name__ : Union[str, Any]=1.0 , **__magic_name__ : Optional[int] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_factor
@classmethod
def __A ( cls : Union[str, Any] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit'''
lowerCamelCase__ = True
def __init__( self : List[Any] , __magic_name__ : Any=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=512 , __magic_name__ : Any=2.6592 , __magic_name__ : Optional[int]=True , **__magic_name__ : Union[str, Any] , ) -> Any:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = OwlViTTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = OwlViTVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = logit_scale_init_value
SCREAMING_SNAKE_CASE_ = return_dict
SCREAMING_SNAKE_CASE_ = 1.0
@classmethod
def __A ( cls : Optional[int] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : str , __magic_name__ : Dict , __magic_name__ : Dict , **__magic_name__ : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = text_config
SCREAMING_SNAKE_CASE_ = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def __A ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def __A ( self : Dict ) -> float:
return 1e-4
def __A ( self : Any , __magic_name__ : "ProcessorMixin" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def __A ( self : Union[str, Any] ) -> int:
return 14
| 305 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305 | 1 |
import cva
import numpy as np
class A_ :
def __init__( self : List[str] , UpperCAmelCase : float , UpperCAmelCase : int ) -> Optional[Any]:
if k in (0.04, 0.06):
__lowerCAmelCase: Optional[Any] = k
__lowerCAmelCase: List[str] = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Union[str, Any] ) -> str:
return str(self.k )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
__lowerCAmelCase: Optional[int] = cva.imread(UpperCAmelCase , 0 )
__lowerCAmelCase , __lowerCAmelCase: Tuple = img.shape
__lowerCAmelCase: list[list[int]] = []
__lowerCAmelCase: Optional[Any] = img.copy()
__lowerCAmelCase: Tuple = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = np.gradient(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = dx**2
__lowerCAmelCase: Union[str, Any] = dy**2
__lowerCAmelCase: Dict = dx * dy
__lowerCAmelCase: Dict = 0.04
__lowerCAmelCase: Optional[int] = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
__lowerCAmelCase: str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase: List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase: str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase: Optional[int] = (wxx * wyy) - (wxy**2)
__lowerCAmelCase: Any = wxx + wyy
__lowerCAmelCase: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_a = HarrisCorner(0.04, 3)
_a , _a = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 322 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 1 |
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = []
lowercase__: str = []
lowercase__: List[str] = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowercase__: Optional[int] = len(snake_case ) if (len(snake_case ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(snake_case ) , 'Postfix'.center(snake_case ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(snake_case ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(snake_case ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(snake_case ) == 0:
stack.append(snake_case ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(snake_case ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(snake_case ) # push x to stack
print(
x.center(8 ) , (''.join(snake_case )).ljust(snake_case ) , (''.join(snake_case )).ljust(snake_case ) , sep=' | ' , ) # Output in tabular format
while len(snake_case ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(snake_case )).ljust(snake_case ) , (''.join(snake_case )).ljust(snake_case ) , sep=' | ' , ) # Output in tabular format
return "".join(snake_case ) # return Postfix as str
def snake_case_ ( snake_case ) -> int:
lowercase__: Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(snake_case ) ):
if infix[i] == "(":
lowercase__: str = ')' # change "(" to ")"
elif infix[i] == ")":
lowercase__: Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(snake_case ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__lowerCAmelCase = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__lowerCAmelCase = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 358 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> List[str]:
'''simple docstring'''
lowercase__: List[str] = parent
lowercase__: Tuple = batch_size
lowercase__: Optional[Any] = image_size
lowercase__: int = patch_size
lowercase__: Union[str, Any] = num_channels
lowercase__: Dict = is_training
lowercase__: Any = use_labels
lowercase__: Optional[int] = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: List[str] = num_attention_heads
lowercase__: Any = intermediate_size
lowercase__: Union[str, Any] = hidden_act
lowercase__: Union[str, Any] = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: str = type_sequence_label_size
lowercase__: List[str] = initializer_range
lowercase__: Optional[Any] = scope
lowercase__: Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__: List[str] = (image_size // patch_size) ** 2
lowercase__: Tuple = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: int = None
if self.use_labels:
lowercase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[str] = TFDeiTModel(config=lowerCAmelCase__ )
lowercase__: Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ )
lowercase__: int = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__: Tuple = 1
lowercase__: Optional[int] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ )
lowercase__: int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = self.type_sequence_label_size
lowercase__: Optional[Any] = TFDeiTForImageClassification(lowerCAmelCase__ )
lowercase__: List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__: Optional[Any] = 1
lowercase__: Union[str, Any] = TFDeiTForImageClassification(lowerCAmelCase__ )
lowercase__: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__: Dict = config_and_inputs
lowercase__: Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowercase : Union[str, Any] = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Tuple = False
__lowercase : int = False
__lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = TFDeiTModelTester(self )
lowercase__: int = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Tuple = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Any = model_class(lowerCAmelCase__ )
lowercase__: Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Dict = [*signature.parameters.keys()]
lowercase__: Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Union[str, Any] = TFDeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def snake_case_ ( ) -> Tuple:
lowercase__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowercase__: str = self.default_image_processor
lowercase__: Dict = prepare_img()
lowercase__: Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors='tf' )
# forward pass
lowercase__: int = model(**lowerCAmelCase__ )
# verify the logits
lowercase__: Tuple = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase__: List[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 288 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = [True] * limit
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : int = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_UpperCAmelCase : Any = i * 2
while index < limit:
_UpperCAmelCase : str = False
_UpperCAmelCase : Any = index + i
_UpperCAmelCase : str = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def __lowerCAmelCase (__lowerCAmelCase = 1_000_000 ):
_UpperCAmelCase : Tuple = prime_sieve(__lowerCAmelCase )
_UpperCAmelCase : Any = 0
_UpperCAmelCase : List[str] = 0
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + length , len(__lowerCAmelCase ) ):
_UpperCAmelCase : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_UpperCAmelCase : Optional[Any] = j - i
_UpperCAmelCase : Optional[int] = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = {"source": "What is love ?", "target": "life"}
_UpperCAmelCase : Any = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase : Dict = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase__ , F"""{split}.{field}""" ) , "w" ) as f:
f.write(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : str = "pytorch" ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = os.path.join(lowerCamelCase__ , "output" )
_UpperCAmelCase : Tuple = os.path.join(lowerCamelCase__ , "data" )
self._create_dummy_data(data_dir=lowerCamelCase__ )
_UpperCAmelCase : str = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
_UpperCAmelCase : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
_UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , "metrics.json" )
with open(lowerCamelCase__ ) as f:
_UpperCAmelCase : Dict = json.load(lowerCamelCase__ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 234 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
UpperCAmelCase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def lowercase ( a__ : List[str] , a__ : Tuple ) -> List[str]:
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = collections.OrderedDict()
_UpperCamelCase = collections.OrderedDict()
_UpperCamelCase = collections.OrderedDict()
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [[t.rstrip('''\n''' )] if (t == ',' or ',' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(_UpperCAmelCase ):
_UpperCamelCase = b
_UpperCamelCase = idx
for wd in b:
_UpperCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : List[str]="<|endoftext|>" , __UpperCamelCase : Tuple="<|startoftext|>" , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : Dict=False , **__UpperCamelCase : Any , ) -> Any:
super().__init__(
unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , do_clean_text=__UpperCamelCase , **__UpperCamelCase , )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
_UpperCamelCase = do_clean_text
_UpperCamelCase = load_vocab_and_emoji(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _UpperCamelCase ( self : str ) -> str:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : int ) -> Optional[Any]:
return self.subword_tokenizer.tokenize(__UpperCamelCase , clean=self.do_clean_text )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any ) -> Optional[Any]:
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any ) -> Union[str, Any]:
return self.subword_tokenizer.convert_id_to_token(__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = ''.join(__UpperCamelCase ).strip()
return out_string
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : "Conversation" ) -> List[int]:
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
def _UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = 0
if os.path.isdir(__UpperCamelCase ):
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
_UpperCamelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_UpperCamelCase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''','''.join(__UpperCamelCase ) + '''\n''' )
index += 1
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , __UpperCamelCase )
return vocab_file, emoji_file
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Tuple ) -> int:
_UpperCamelCase = vocab # same as swe
_UpperCamelCase = ids_to_tokens # same as bpe
_UpperCamelCase = emoji
_UpperCamelCase = np.max([len(__UpperCamelCase ) for w in self.vocab.keys()] )
_UpperCamelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
_UpperCamelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
_UpperCamelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
_UpperCamelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_UpperCamelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_UpperCamelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
_UpperCamelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_UpperCamelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_UpperCamelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : List[Any] ) -> Optional[int]:
return len(self.ids_to_tokens )
def _UpperCamelCase ( self : int , __UpperCamelCase : Optional[int] ) -> Any:
_UpperCamelCase = self.content_repattera.sub('''<URL>''' , __UpperCamelCase )
_UpperCamelCase = self.content_repattera.sub('''<EMAIL>''' , __UpperCamelCase )
_UpperCamelCase = self.content_repattera.sub('''<TEL>''' , __UpperCamelCase )
_UpperCamelCase = self.content_repattera.sub('''<DATE>''' , __UpperCamelCase )
_UpperCamelCase = self.content_repattera.sub('''<DATE>''' , __UpperCamelCase )
_UpperCamelCase = self.content_repattera.sub('''<PRICE>''' , __UpperCamelCase )
_UpperCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_UpperCamelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def _UpperCamelCase ( self : str , __UpperCamelCase : Any , __UpperCamelCase : Any=False ) -> str:
_UpperCamelCase = text.replace(''' ''' , '''<SP>''' )
_UpperCamelCase = text.replace(''' ''' , '''<SP>''' )
_UpperCamelCase = text.replace('''\r\n''' , '''<BR>''' )
_UpperCamelCase = text.replace('''\n''' , '''<BR>''' )
_UpperCamelCase = text.replace('''\r''' , '''<BR>''' )
_UpperCamelCase = text.replace('''\t''' , '''<TAB>''' )
_UpperCamelCase = text.replace('''—''' , '''ー''' )
_UpperCamelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
_UpperCamelCase = text.replace(__UpperCamelCase , __UpperCamelCase )
if clean:
_UpperCamelCase = self.clean_text(__UpperCamelCase )
def check_simbol(__UpperCamelCase : Optional[Any] ):
_UpperCamelCase = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 2:
_UpperCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(__UpperCamelCase : Union[str, Any] ):
_UpperCamelCase = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 3:
_UpperCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
_UpperCamelCase = 0
_UpperCamelCase = []
while pos < len(__UpperCamelCase ):
_UpperCamelCase = min(len(__UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
_UpperCamelCase = [] # (token_id, token, pos)
for e in range(__UpperCamelCase , __UpperCamelCase , -1 ):
_UpperCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCamelCase ) > 2:
_UpperCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCamelCase ) > 0:
# the smallest token_id is adopted
_UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[0] )[0]
result.append(__UpperCamelCase )
_UpperCamelCase = e
else:
_UpperCamelCase = pos + 1
_UpperCamelCase = text[pos:end]
if check_simbol(__UpperCamelCase ):
result.append('''<KIGOU>''' )
elif checkuae(__UpperCamelCase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
_UpperCamelCase = end
return result
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[Any]="\n" ) -> List[str]:
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
_UpperCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(__UpperCamelCase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
_UpperCamelCase = ''.join(__UpperCamelCase )
return text
| 359 | """simple docstring"""
def lowercase ( a__ : Tuple , a__ : str ) -> Tuple:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( a__ : Optional[int] , a__ : List[str]=0 ) -> Optional[Any]:
return sorted(a__ , key=lambda a__ : x[column] )
def lowercase ( a__ : Optional[int] , a__ : Optional[int] , a__ : Tuple=float('''inf''' ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[Any]=float('''inf''' ) ) -> str:
for i in range(min(6 , points_counts - 1 ) , a__ ):
for j in range(max(0 , i - 6 ) , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : int , a__ : str , a__ : Any ) -> str:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a__ , a__ )
# recursion
_UpperCamelCase = points_counts // 2
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[:mid] , a__ )
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[mid:] , points_counts - mid )
_UpperCamelCase = min(a__ , a__ )
_UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a__ )
_UpperCamelCase = dis_between_closest_in_strip(
a__ , len(a__ ) , a__ )
return min(a__ , a__ )
def lowercase ( a__ : Dict , a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = column_based_sort(a__ , column=0 )
_UpperCamelCase = column_based_sort(a__ , column=1 )
return (
closest_pair_of_points_sqr(
a__ , a__ , a__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 54 | 0 |
"""simple docstring"""
import sys
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
lowerCAmelCase : Tuple = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase : List[Any] = a + chain_length - 1
lowerCAmelCase : List[Any] = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase : str = cost
lowerCAmelCase : Optional[int] = c
return matrix, sol
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if i == j:
print("A" + str(SCREAMING_SNAKE_CASE ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase , lowerCAmelCase : List[Any] = matrix_chain_order(SCREAMING_SNAKE_CASE )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE , 1 , n - 1 )
if __name__ == "__main__":
main()
| 108 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : str = XLMRobertaTokenizer
_lowerCAmelCase : int = XLMRobertaTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = True
def _snake_case ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : str ):
snake_case_ : List[Any] = '''<pad>'''
snake_case_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowercase_ ) , 1002 )
def _snake_case ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[str] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : Tuple = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : Dict = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def _snake_case ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
snake_case_ : Any = XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
snake_case_ : List[Any] = pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def _snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Dict = '''I was born in 92000, and this is falsé.'''
snake_case_ : Optional[int] = tokenizer.tokenize(lowercase_ )
snake_case_ : Tuple = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : str = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(lowercase_ )
snake_case_ : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = '''Hello World!'''
snake_case_ : int = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case_ : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : Dict ):
# fmt: off
snake_case_ : int = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 264 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
def lowerCamelCase__ ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = ['''torch''']
def __init__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''torch''']
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = ['''torch''']
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def _A ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ["torch"] )
| 352 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319 | 0 |
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def UpperCamelCase ( __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
while i * i <= n:
lowercase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(__magic_name__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 305 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
__UpperCAmelCase : str ="""markuplm"""
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=0 , __a=0 , __a=2 , __a=2_56 , __a=10_24 , __a=2_16 , __a=10_01 , __a=32 , __a=50 , __a="absolute" , __a=True , __a=None , **__a , ):
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
# additional properties
__lowerCAmelCase = max_depth
__lowerCAmelCase = max_xpath_tag_unit_embeddings
__lowerCAmelCase = max_xpath_subs_unit_embeddings
__lowerCAmelCase = tag_pad_id
__lowerCAmelCase = subs_pad_id
__lowerCAmelCase = xpath_unit_hidden_size
| 57 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase__ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(tmpdirname)
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
UpperCAmelCase__ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase__ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase__ = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCAmelCase__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 288 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
snake_case_ = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
snake_case_ = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
snake_case_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case__ ( SCREAMING_SNAKE_CASE_ : tuple ):
'''simple docstring'''
return x[0]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : str = get_letter_count(SCREAMING_SNAKE_CASE_ )
lowercase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE_ )
lowercase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE_ )
lowercase__ : str = ''.join(freq_to_letter[freq] )
lowercase__ : Tuple = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ )
lowercase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : List[str] = get_frequency_order(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
snake_case_ = '''</w>'''
snake_case_ = '''@@ '''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
snake_case_ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a , a="<s>" , a="<pad>" , a="</s>" , a="<unk>" , a=False , a=None , **a , ):
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , do_lower_case=a , **a , )
lowercase__ : str = do_lower_case
with open(a , encoding='utf-8') as vocab_handle:
lowercase__ : Tuple = json.load(a)
lowercase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""")
lowercase__ : int = None
lowercase__ : List[Any] = None
else:
with open(a , encoding='utf-8') as merges_handle:
lowercase__ : List[Any] = merges_handle.read().split('\n')[:-1]
lowercase__ : Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
lowercase__ : Tuple = dict(zip(a , range(len(a))))
lowercase__ : List[str] = {}
@property
def snake_case_ ( self):
return len(self.decoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , a):
lowercase__ : int = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase__ : Any = get_pairs(a)
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a: self.bpe_ranks.get(a , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Dict = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : int = 0
while i < len(a):
try:
lowercase__ : Dict = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ : str = tuple(a)
lowercase__ : Union[str, Any] = new_word
if len(a) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(a)
lowercase__ : List[str] = ' '.join(a)
if word == "\n " + BPE_TOKEN_MERGES:
lowercase__ : Optional[int] = '\n' + BPE_TOKEN_MERGES
if word.endswith(a):
lowercase__ : Dict = word.replace(a , '')
lowercase__ : int = word.replace(' ' , a)
lowercase__ : List[str] = word
return word
def snake_case_ ( self , a):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
lowercase__ : int = text.lower()
lowercase__ : Optional[int] = text.split()
lowercase__ : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a).split(' ')))
return split_tokens
def snake_case_ ( self , a):
return self.encoder.get(a , self.encoder.get(self.unk_token))
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = self.decoder.get(a , self.unk_token)
return result
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = ' '.join(a)
# make sure @@ tokens are concatenated
lowercase__ : Optional[int] = ''.join(string.split(a))
return string
def snake_case_ ( self , a , a = None):
if not os.path.isdir(a):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a) + '\n')
lowercase__ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
lowercase__ : Dict = token_index
writer.write(' '.join(a) + '\n')
index += 1
return (vocab_file, merges_file)
| 216 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case_ : Union[str, Any] = TypeVar("T")
class __snake_case ( Generic[T] ):
UpperCAmelCase__ : deque[T] # Cache store of keys
UpperCAmelCase__ : set[T] # References of the keys in cache
UpperCAmelCase__ : int = 1_0 # Maximum capacity of cache
def __init__( self : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = deque()
UpperCAmelCase_ = set()
if not n:
UpperCAmelCase_ = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''')
else:
UpperCAmelCase_ = n
def lowerCamelCase ( self : int , _snake_case : T):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store) == LRUCache._MAX_CAPACITY:
UpperCAmelCase_ = self.dq_store.pop()
self.key_reference.remove(_snake_case)
else:
self.dq_store.remove(_snake_case)
self.dq_store.appendleft(_snake_case)
self.key_reference.add(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
for k in self.dq_store:
print(_snake_case)
def __repr__( self : Optional[Any]):
"""simple docstring"""
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store)}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 51 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 0 |
def A_ ( _lowerCAmelCase ) -> float:
return 10 - x * x
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
UpperCamelCase : Union[str, Any] = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase : List[Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
UpperCamelCase : int = c
else:
UpperCamelCase : List[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 140 |
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : List[str] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCamelCase : Optional[int] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Tuple = spearmanr(A_ , A_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 140 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCAmelCase__ = '''▁'''
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Any ,lowercase__ : Any ,lowercase__ : Tuple="</s>" ,lowercase__ : Union[str, Any]="<unk>" ,lowercase__ : Union[str, Any]="<pad>" ,lowercase__ : int=1_0_0 ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Dict[str, Any]] = None ,lowercase__ : int=True ,**lowercase__ : int ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase = [F"<extra_id_{i}>" for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowercase = len(set(filter(lambda lowercase__ : bool('''extra_id''' in str(lowercase__ ) ) ,lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__lowercase = legacy
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,extra_ids=lowercase__ ,additional_special_tokens=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,legacy=lowercase__ ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = extra_ids
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Any ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' ,lowercase__ ,)
return max_model_length
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return list(
set(filter(lambda lowercase__ : bool(re.search(r'''<extra_id_\d+>''' ,lowercase__ ) ) is not None ,self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[int] ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
__lowercase = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[int] ,lowercase__ : int ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : "TextInput" ,**lowercase__ : Optional[int] ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__lowercase = SPIECE_UNDERLINE + text.replace(lowercase__ ,''' ''' )
return super().tokenize(lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,**lowercase__ : Any ):
if not self.legacy:
__lowercase = text.startswith(lowercase__ )
if is_first:
__lowercase = text[1:]
__lowercase = self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(lowercase__ ):
__lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ):
if token.startswith('''<extra_id_''' ):
__lowercase = re.match(r'''<extra_id_(\d+)>''' ,lowercase__ )
__lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ):
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(lowercase__ )
else:
__lowercase = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 104 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase__ = TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , a_ : Any , a_ : Any ):
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[int] = len(_a )
lowerCAmelCase_ : Optional[Any] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase_ : Optional[int] = fnc
self.build()
def lowerCamelCase ( self : Tuple ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase_ : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase ( self : int , a_ : Optional[Any] , a_ : Optional[int] ):
p += self.N
lowerCAmelCase_ : int = v
while p > 1:
lowerCAmelCase_ : Tuple = p // 2
lowerCAmelCase_ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase ( self : Optional[int] , a_ : List[str] , a_ : List[Any] ): # noqa: E741
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = l + self.N, r + self.N
lowerCAmelCase_ : List[str] = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase_ : int = self.st[l] if res is None else self.fn(_a , self.st[l] )
if r % 2 == 0:
lowerCAmelCase_ : List[str] = self.st[r] if res is None else self.fn(_a , self.st[r] )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase__ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase__ = SegmentTree(test_array, min)
lowercase__ = SegmentTree(test_array, max)
lowercase__ = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
for i in range(len(__a ) ):
for j in range(__a , len(__a ) ):
lowerCAmelCase_ : List[str] = reduce(__a , test_array[i : j + 1] )
lowerCAmelCase_ : Union[str, Any] = reduce(__a , test_array[i : j + 1] )
lowerCAmelCase_ : Dict = reduce(lambda __UpperCamelCase , __UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__a , __a )
assert max_range == max_segment_tree.query(__a , __a )
assert sum_range == sum_segment_tree.query(__a , __a )
test_all_segments()
for index, value in test_updates.items():
lowercase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 350 |
"""simple docstring"""
import qiskit
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCAmelCase_ : int = qiskit.Aer.get_backend("aer_simulator" )
lowerCAmelCase_ : List[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase_ : Tuple = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 161 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["speech"]
def __init__( self : str , *A : Any , **A : Any ):
requires_backends(self , ["speech"] )
class lowerCamelCase_ (metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = ["speech"]
def __init__( self : str , *A : Union[str, Any] , **A : str ):
requires_backends(self , ["speech"] )
| 31 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 42
class lowercase_ ( lowercase , lowercase ):
'''simple docstring'''
__snake_case = True
@register_to_config
def __init__( self : Any , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , __UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , __UpperCAmelCase : Tuple[int] = (64,) , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "silu" , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 32 , __UpperCAmelCase : int = 32 , __UpperCAmelCase : float = 0.18215 , ) ->Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
a = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
a = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
a = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a = False
a = False
# only relevant if vae tiling is enabled
a = self.config.sample_size
a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
a = 0.25
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]=False ) ->Union[str, Any]:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
a = value
def __lowerCAmelCase ( self : int , __UpperCAmelCase : bool = True ) ->Union[str, Any]:
"""simple docstring"""
a = use_tiling
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
self.enable_tiling(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
a = True
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self : Union[str, Any] ) ->Dict[str, AttentionProcessor]:
"""simple docstring"""
a = {}
def fn_recursive_add_processors(__UpperCAmelCase : str , __UpperCAmelCase : torch.nn.Module , __UpperCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) ->Any:
"""simple docstring"""
a = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase : str , __UpperCAmelCase : torch.nn.Module , __UpperCAmelCase : Dict ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
a = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
a = torch.cat(__UpperCAmelCase )
else:
a = self.encoder(__UpperCAmelCase )
a = self.quant_conv(__UpperCAmelCase )
a = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
a = self.post_quant_conv(__UpperCAmelCase )
a = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def __lowerCAmelCase ( self : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
a = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
a = torch.cat(__UpperCAmelCase )
else:
a = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ) ->Optional[int]:
"""simple docstring"""
a = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->AutoencoderKLOutput:
"""simple docstring"""
a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
a = int(self.tile_latent_min_size * self.tile_overlap_factor )
a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
a = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a = self.encoder(__UpperCAmelCase )
a = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
a = []
for i, row in enumerate(__UpperCAmelCase ):
a = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
a = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
a = torch.cat(__UpperCAmelCase , dim=2 )
a = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
a = int(self.tile_sample_min_size * self.tile_overlap_factor )
a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
a = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a = self.post_quant_conv(__UpperCAmelCase )
a = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
a = []
for i, row in enumerate(__UpperCAmelCase ):
a = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
a = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
a = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[torch.Generator] = None , ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a = sample
a = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
a = posterior.sample(generator=__UpperCAmelCase )
else:
a = posterior.mode()
a = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
def merge(__UpperCAmelCase , __UpperCAmelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCAmelCase__ ) <= 1:
return collection
lowercase__: List[str] = len(lowerCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 177 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
# vision encoder
if "img_encoder.pos_embed" in name:
__a : Any = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
__a : str = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
__a : int = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
__a : Union[str, Any] = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
__a : List[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
__a : Tuple = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
__a : List[Any] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
__a : Any = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
__a : Union[str, Any] = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
__a : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
__a : Union[str, Any] = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
__a : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
__a : Any = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
__a : Any = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
__a : str = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__a : Union[str, Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__a : Union[str, Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__a : Union[str, Any] = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
__a : Optional[int] = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
__a : str = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
__a : List[str] = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
__a : str = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
__a : int = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
__a : List[str] = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
for key in orig_state_dict.copy().keys():
__a : List[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : Tuple = key.split('''.''' )
__a , __a : List[Any] = int(key_split[2] ), int(key_split[4] )
__a : List[Any] = config.vision_config.hidden_size
if "weight" in key:
__a : int = val[:dim, :]
__a : List[str] = val[dim : dim * 2, :]
__a : List[Any] = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : int = val[dim : dim * 2]
__a : Any = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : int = key.split('''.''' )
__a : str = int(key_split[3] )
__a : List[Any] = config.text_config.hidden_size
if "weight" in key:
__a : List[str] = val[:dim, :]
__a : Any = val[
dim : dim * 2, :
]
__a : Dict = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : Any = val[dim : dim * 2]
__a : Any = val[-dim:]
else:
__a : Union[str, Any] = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__a : List[Any] = val.squeeze_()
else:
__a : Dict = val
return orig_state_dict
def __UpperCamelCase ( ):
__a : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]="groupvit-gcc-yfcc" , lowerCAmelCase__ : int=False ):
__a : Union[str, Any] = GroupViTConfig()
__a : int = GroupViTModel(lowerCAmelCase__ ).eval()
__a : Any = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
__a : Optional[Any] = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a : Dict = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
__a : Any = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
__a : Optional[Any] = prepare_img()
__a : Optional[int] = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
__a : Tuple = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
__a : List[str] = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__a : List[str] = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' , lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowercase__ =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 216 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCamelCase_ : Tuple = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCamelCase_ : List[Any] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_ : Optional[Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase_ : List[str] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(lowerCamelCase )-1}" )
if "norm" in key:
UpperCamelCase_ : int = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_ : int = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCamelCase_ : int = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(lowerCamelCase )-1}" )
if "layer_norm1" in key:
UpperCamelCase_ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase_ : str = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_ : Union[str, Any] = key[key.find('block' ) + len('block' )]
UpperCamelCase_ : Dict = key.replace(F"block{idx}" , F"block.{int(lowerCamelCase )-1}" )
if "attn.q" in key:
UpperCamelCase_ : Optional[Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase_ : Any = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase_ : Optional[int] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase_ : int = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase_ : Optional[int] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase_ : Tuple = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase_ : Union[str, Any] = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase_ : Tuple = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase_ : List[str] = key.replace(F"linear_c{idx}" , F"linear_c.{int(lowerCamelCase )-1}" )
if "bot_conv" in key:
UpperCamelCase_ : Union[str, Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCamelCase_ : Optional[int] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCamelCase_ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCamelCase_ : Tuple = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCamelCase_ : Any = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCamelCase_ : int = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCamelCase_ : Optional[Any] = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCamelCase_ : str = key.replace('module.last_layer_depth' , 'head.head' )
UpperCamelCase_ : Optional[Any] = value
return new_state_dict
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_ : Union[str, Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase_ : int = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_ : int = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_ : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : Optional[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=None ):
UpperCamelCase_ : List[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase_ : Tuple = GLPNImageProcessor()
# prepare image
UpperCamelCase_ : List[Any] = prepare_img()
UpperCamelCase_ : str = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCamelCase_ : Any = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
UpperCamelCase_ : str = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
UpperCamelCase_ : Dict = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
UpperCamelCase_ : Optional[Any] = model(lowerCamelCase )
UpperCamelCase_ : str = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase_ : Tuple = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
UpperCamelCase_ : Any = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
UpperCamelCase_ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50 | import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 50 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : dict ,__lowercase : str ,__lowercase : set ,__lowercase : set ,__lowercase : dict ,__lowercase : dict ,__lowercase : PriorityQueue ,__lowercase : dict ,__lowercase : float | int ,):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : List[str] = cst_fwd.get(__lowercase ,np.inf )
A_ : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : Any = new_cost_f
A_ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : str = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : List[str] = -1
A_ : List[Any] = set()
A_ : Union[str, Any] = set()
A_ : int = {source: 0}
A_ : List[Any] = {destination: 0}
A_ : Dict = {source: None}
A_ : Optional[int] = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ : List[str] = queue_forward.get()
visited_forward.add(__lowercase )
A_ , A_ : Union[str, Any] = queue_backward.get()
visited_backward.add(__lowercase )
A_ : int = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
A_ : str = pass_and_relaxation(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : int = shortest_distance
return shortest_path_distance
_UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
_UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 | 1 |
import mpmath # for roots of unity
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None ) -> List[Any]:
# Input as list
__lowerCAmelCase = list(poly_a or [0] )[:]
__lowerCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowerCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__lowerCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__lowerCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__lowerCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__lowerCAmelCase = self.__multiply()
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str ) -> Optional[int]:
__lowerCAmelCase = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCAmelCase_ ) <= 1:
return dft[0]
#
__lowerCAmelCase = self.c_max_length // 2
while next_ncol > 0:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root**next_ncol
# First half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__lowerCAmelCase = new_dft
__lowerCAmelCase = next_ncol // 2
return dft[0]
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = self.__dft('A' )
__lowerCAmelCase = self.__dft('B' )
__lowerCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__lowerCAmelCase = 2
while next_ncol <= self.c_max_length:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root ** (next_ncol // 2)
__lowerCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__lowerCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
__lowerCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> int:
__lowerCAmelCase = 'A = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
__lowerCAmelCase = 'B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
__lowerCAmelCase = 'A*B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Optional[int]:
__lowerCAmelCase = 1_0
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = [1, 2, 3, 4]
__lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCAmelCase_ , self.block_size , 0 ) , lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ''
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [] )
self.assertEqual(lowerCAmelCase_ , [] )
def lowercase ( self : int ) -> int:
__lowerCAmelCase = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
__lowerCAmelCase , __lowerCAmelCase = process_story(lowerCAmelCase_ )
__lowerCAmelCase = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = ['It was the best of times.']
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = torch.tensor([1, 2, 3, 4] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 0 ).numpy() , expected.numpy() )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 2_3 ).numpy() , expected.numpy() )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase_ , 1 ).numpy() , expected.numpy() )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = 1_0_1
__lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
__lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCAmelCase = compute_token_type_ids(lowerCAmelCase_ , lowerCAmelCase_ )
np.testing.assert_array_equal(lowerCAmelCase_ , lowerCAmelCase_ )
| 207 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
while repunit:
lowerCamelCase_ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0_0_0_0_0 ):
lowerCamelCase_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Optional[Any] = {"UserAgent": UserAgent().random}
def snake_case ( UpperCAmelCase )-> dict:
"""simple docstring"""
__A = script.contents[0]
__A = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase__ :
def __init__( self :Optional[Any] , _A :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = F'https://www.instagram.com/{username}/'
__A = self.get_json()
def lowercase_ ( self :Union[str, Any] ) -> dict:
'''simple docstring'''
__A = requests.get(self.url , headers=_A ).text
__A = BeautifulSoup(_A , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self :Union[str, Any] ) -> str:
'''simple docstring'''
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self :List[Any] ) -> str:
'''simple docstring'''
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def lowercase_ ( self :int ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def lowercase_ ( self :Tuple ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def lowercase_ ( self :Dict ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def lowercase_ ( self :Union[str, Any] ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def snake_case ( UpperCAmelCase = "github" )-> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__A = InstagramUser(UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 161 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 369 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ = """"""
else:
lowercase_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dct.pop(__lowerCAmelCase )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = ViTMSNConfig()
lowercase_ = 10_00
lowercase_ = """datasets/huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ = 3_84
lowercase_ = 15_36
lowercase_ = 6
elif "l16" in checkpoint_url:
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
elif "b4" in checkpoint_url:
lowercase_ = 4
elif "l7" in checkpoint_url:
lowercase_ = 7
lowercase_ = 10_24
lowercase_ = 40_96
lowercase_ = 24
lowercase_ = 16
lowercase_ = 0.1
lowercase_ = ViTMSNModel(__lowerCAmelCase )
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
lowercase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCAmelCase )
lowercase_ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , base_model=__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**__lowerCAmelCase )
lowercase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowercase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowercase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowercase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowercase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_A : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case_ ) )
return round(snake_case_,ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any=2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : str=2 , UpperCAmelCase : int=7 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : List[Any]=36 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=1000 , ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : str = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : str = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : Union[str, Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Any = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Tuple = coordinate_size
lowerCamelCase__ : Any = shape_size
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : List[str] = num_choices
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ : List[Any] = text_seq_length
lowerCamelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ : Optional[Any] = self.text_seq_length + self.image_seq_length
def A_ ( self : Union[str, Any] ) -> Any:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowerCamelCase__ : Union[str, Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ : str = bbox[i, j, 3]
lowerCamelCase__ : Union[str, Any] = bbox[i, j, 1]
lowerCamelCase__ : Optional[int] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ : Optional[int] = bbox[i, j, 2]
lowerCamelCase__ : Optional[int] = bbox[i, j, 0]
lowerCamelCase__ : Any = tmp_coordinate
lowerCamelCase__ : List[str] = tf.constant(UpperCAmelCase )
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ : int = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase__ : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ) -> str:
lowerCamelCase__ : List[Any] = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
lowerCamelCase__ : str = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
lowerCamelCase__ : Any = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ : Optional[int] = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ : int = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A_ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any] ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] ) -> str:
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : Any = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A_ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : List[str] = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
lowerCamelCase__ : Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ) -> str:
return True
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=False ) -> dict:
lowerCamelCase__ : str = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
lowerCamelCase__ : int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
lowerCamelCase__ : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowerCamelCase__ : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
lowerCamelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def A_ ( self : Dict ) -> Union[str, Any]:
lowerCamelCase__ : str = TFLayoutLMvaModelTester(self )
lowerCamelCase__ : int = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A_ ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ) -> Tuple:
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase__ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCamelCase__ : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
lowerCamelCase__ : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase__ : str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCamelCase__ : Tuple = prepared_for_class.pop('input_ids' )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCamelCase__ : Tuple = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
lowerCamelCase__ : Tuple = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase__ : Union[str, Any] = -100
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCAmelCase )
lowerCamelCase__ : Tuple = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCamelCase__ : Any = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase__ : Dict = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase__ : Tuple = inspect.signature(model.call ).parameters
lowerCamelCase__ : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase__ : Optional[Any] = {0: 'input_ids'}
for label_key in label_keys:
lowerCamelCase__ : int = signature_names.index(UpperCAmelCase )
lowerCamelCase__ : int = label_key
lowerCamelCase__ : int = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase__ : int = prepared_for_class[value]
lowerCamelCase__ : Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
lowerCamelCase__ : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def A_ ( self : List[str] ) -> List[Any]:
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Any:
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : str ) -> List[str]:
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def A_ ( self : List[Any] ) -> List[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : int ) -> Tuple:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
lowerCamelCase__ : List[str] = self.default_image_processor
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
lowerCamelCase__ : List[Any] = tf.constant([[1, 2]] )
lowerCamelCase__ : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowerCamelCase__ : Optional[int] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
lowerCamelCase__ : str = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
lowerCamelCase__ : List[str] = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 50 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[str]=False , UpperCAmelCase : bool = False , ) -> List[str]:
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowerCamelCase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : str = cached_features_file + '.lock'
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : int = torch.load(UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : str = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info('Training examples: %s' , len(UpperCAmelCase ) )
lowerCamelCase__ : Dict = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase : Dict ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Any=False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
lowerCamelCase__ : Any = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : str = label_list[2], label_list[1]
lowerCamelCase__ : Optional[int] = label_list
lowerCamelCase__ : int = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Any ) -> Any:
return self.dataset
def __len__( self : Tuple ) -> int:
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase : Any ) -> InputFeatures:
return self.features[i]
def A_ ( self : Dict ) -> str:
return self.label_list
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : int , UpperCAmelCase : List[Any] ) -> int:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Any , UpperCAmelCase : int ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[str] = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase__ : Tuple = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Dict = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
lowerCamelCase__ : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCamelCase__ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCamelCase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : str = {
"""hans""": 3,
}
_UpperCAmelCase : List[Any] = {
"""hans""": HansProcessor,
}
| 50 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = """▁"""
_UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_UpperCAmelCase = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
_UpperCAmelCase = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
_UpperCAmelCase = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="<unk>" , lowercase="m2m100" , lowercase = None , lowercase=8 , **lowercase , ):
"""simple docstring"""
A_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : List[Any] = language_codes
A_ : int = FAIRSEQ_LANGUAGE_CODES[language_codes]
A_ : Union[str, Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
A_ : str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
A_ : str = vocab_file
A_ : Tuple = load_json(lowercase )
A_ : Dict = {v: k for k, v in self.encoder.items()}
A_ : Tuple = spm_file
A_ : Optional[int] = load_spm(lowercase , self.sp_model_kwargs )
A_ : Union[str, Any] = len(self.encoder )
A_ : List[str] = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
A_ : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
A_ : List[Any] = {v: k for k, v in self.lang_token_to_id.items()}
A_ : Any = src_lang if src_lang is not None else 'en'
A_ : Any = tgt_lang
A_ : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A_ : int = num_madeup_words
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.sp_model.encode(lowercase , out_type=lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = []
A_ : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
A_ : str = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
A_ : List[Any] = [1] * len(self.prefix_tokens )
A_ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.__dict__.copy()
A_ : List[str] = None
return state
def __setstate__( self , lowercase ):
"""simple docstring"""
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A_ : List[Any] = {}
A_ : List[str] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Optional[Any] = Path(lowercase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
A_ : str = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
A_ : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , 'wb' ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def lowerCAmelCase_ ( self , lowercase , lowercase = "en" , lowercase = None , lowercase = "ro" , **lowercase , ):
"""simple docstring"""
A_ : Tuple = src_lang
A_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A_ : Optional[Any] = src_lang
A_ : List[str] = self(lowercase , add_special_tokens=lowercase , **lowercase )
A_ : Tuple = self.get_lang_id(lowercase )
A_ : Dict = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.get_lang_token(lowercase )
A_ : Optional[Any] = self.lang_token_to_id[lang_token]
A_ : List[Any] = [self.cur_lang_id]
A_ : Union[str, Any] = [self.eos_token_id]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.get_lang_token(lowercase )
A_ : str = self.lang_token_to_id[lang_token]
A_ : Union[str, Any] = [self.cur_lang_id]
A_ : Optional[int] = [self.eos_token_id]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def UpperCamelCase ( __lowercase : str ,__lowercase : Dict[str, Any] ):
'''simple docstring'''
A_ : int = sentencepiece.SentencePieceProcessor(**__lowercase )
spm.Load(str(__lowercase ) )
return spm
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
with open(__lowercase ,'r' ) as f:
return json.load(__lowercase )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : str ):
'''simple docstring'''
with open(__lowercase ,'w' ) as f:
json.dump(__lowercase ,__lowercase ,indent=2 )
| 192 | import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase = """hopper-medium-v2"""
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1000
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 192 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase: Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def a( A : Dict , A : str , A : Any = 1_6000 ) -> List[str]:
"""simple docstring"""
a = int(round(sample_rate * max_length ) )
if len(lowerCamelCase_ ) <= sample_length:
return wav
a = randint(0 , len(lowerCamelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _lowercase :
"""simple docstring"""
__A = field(default=A__, metadata={"help": "Name of a dataset from the datasets package"} )
__A = field(
default=A__, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__A = field(
default=A__, metadata={"help": "A file containing the training audio paths and labels."} )
__A = field(
default=A__, metadata={"help": "A file containing the validation audio paths and labels."} )
__A = field(
default="train", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
__A = field(
default="validation", metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
}, )
__A = field(
default="audio", metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, )
__A = field(
default="label", metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__A = field(
default=A__, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
__A = field(
default=A__, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
}, )
__A = field(
default=20, metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."}, )
@dataclass
class _lowercase :
"""simple docstring"""
__A = field(
default="facebook/wav2vec2-base", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, )
__A = field(
default=A__, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__A = field(
default=A__, metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__A = field(
default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, )
__A = field(
default=A__, metadata={"help": "Name or path of preprocessor config."} )
__A = field(
default=A__, metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__A = field(
default=A__, metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__A = field(
default=A__, metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
}, )
__A = field(
default=A__, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__A = field(
default=A__, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , lowerCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def a( ) -> str:
"""simple docstring"""
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
a = DatasetDict()
a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
a = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
a = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
a = feature_extractor.model_input_names[0]
def train_transforms(A : List[Any] ):
a = []
for audio in batch[data_args.audio_column_name]:
a = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCamelCase_ )
a = feature_extractor(lowerCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
a = {model_input_name: inputs.get(lowerCamelCase_ )}
a = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(A : Optional[int] ):
a = [audio["array"] for audio in batch[data_args.audio_column_name]]
a = feature_extractor(lowerCamelCase_ , sampling_rate=feature_extractor.sampling_rate )
a = {model_input_name: inputs.get(lowerCamelCase_ )}
a = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a = raw_datasets["train"].features[data_args.label_column_name].names
a , a = {}, {}
for i, label in enumerate(lowerCamelCase_ ):
a = str(lowerCamelCase_ )
a = label
# Load the accuracy metric from the datasets package
a = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(A : Union[str, Any] ):
a = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCamelCase_ , references=eval_pred.label_ids )
a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel=lowerCamelCase_ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
a = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCamelCase_ , output_all_columns=lowerCamelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
a = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCamelCase_ , output_all_columns=lowerCamelCase_ )
# Initialize our trainer
a = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , )
# Training
if training_args.do_train:
a = None
if training_args.resume_from_checkpoint is not None:
a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a = last_checkpoint
a = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase_ )
trainer.save_metrics("eval" , lowerCamelCase_ )
# Write model card and (optionally) push to hub
a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
if __name__ == "__main__":
main()
| 227 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rt in rc.restypes:
lowercase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase__ = {name: i for i, name in enumerate(lowerCamelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase__ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
lowercase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase__ = rc.restype_atoa[restype_letter]
lowercase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ = rc.atom_order[atom_name]
lowercase__ = 1
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
return protein
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tree_map(lambda lowerCamelCase_ : torch.tensor(lowerCamelCase_ , device=batch['''aatype'''].device ) , lowerCamelCase_ , np.ndarray )
lowercase__ = tensor_tree_map(lambda lowerCamelCase_ : np.array(lowerCamelCase_ ) , make_atomaa_masks(lowerCamelCase_ ) )
return out
| 207 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , **A_ ) -> Optional[int]:
super().__init__(**A_ )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(A_ )
def __call__( self , A_ , A_ = None , **A_ , ) -> int:
if "text_queries" in kwargs:
__UpperCamelCase =kwargs.pop('text_queries' )
if isinstance(A_ , (str, Image.Image) ):
__UpperCamelCase ={'image': image, 'candidate_labels': candidate_labels}
else:
__UpperCamelCase =image
__UpperCamelCase =super().__call__(A_ , **A_ )
return results
def _a ( self , **A_ ) -> int:
__UpperCamelCase ={}
if "threshold" in kwargs:
__UpperCamelCase =kwargs['threshold']
if "top_k" in kwargs:
__UpperCamelCase =kwargs['top_k']
return {}, {}, postprocess_params
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =load_image(inputs['image'] )
__UpperCamelCase =inputs['candidate_labels']
if isinstance(A_ , A_ ):
__UpperCamelCase =candidate_labels.split(',' )
__UpperCamelCase =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(A_ ):
__UpperCamelCase =self.tokenizer(A_ , return_tensors=self.framework )
__UpperCamelCase =self.image_processor(A_ , return_tensors=self.framework )
yield {
"is_last": i == len(A_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _a ( self , A_ ) -> Optional[Any]:
__UpperCamelCase =model_inputs.pop('target_size' )
__UpperCamelCase =model_inputs.pop('candidate_label' )
__UpperCamelCase =model_inputs.pop('is_last' )
__UpperCamelCase =self.model(**A_ )
__UpperCamelCase ={'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def _a ( self , A_ , A_=0.1 , A_=None ) -> Dict:
__UpperCamelCase =[]
for model_output in model_outputs:
__UpperCamelCase =model_output['candidate_label']
__UpperCamelCase =BaseModelOutput(A_ )
__UpperCamelCase =self.image_processor.post_process_object_detection(
outputs=A_ , threshold=A_ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
__UpperCamelCase =outputs['scores'][index].item()
__UpperCamelCase =self._get_bounding_box(outputs['boxes'][index][0] )
__UpperCamelCase ={'score': score, 'label': label, 'box': box}
results.append(A_ )
__UpperCamelCase =sorted(A_ , key=lambda A_ : x["score"] , reverse=A_ )
if top_k:
__UpperCamelCase =results[:top_k]
return results
def _a ( self , A_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =box.int().tolist()
__UpperCamelCase ={
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 369 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Features ):
__UpperCamelCase =np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Dict:
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__UpperCamelCase =path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__UpperCamelCase =_PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase =Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def _a ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
__UpperCamelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__UpperCamelCase =self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ = None , **A_ , ) -> List[Any]:
__UpperCamelCase =dataset
__UpperCamelCase =path_or_buf
__UpperCamelCase =batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase =parquet_writer_kwargs
def _a ( self ) -> int:
__UpperCamelCase =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase =self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
__UpperCamelCase =self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def _a ( self , A_ , A_ , **A_ ) -> int:
__UpperCamelCase =0
__UpperCamelCase =parquet_writer_kwargs.pop('path_or_buf' , A_ )
__UpperCamelCase =self.dataset.features.arrow_schema
__UpperCamelCase =pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase =query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 117 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase( *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowercase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Any = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowerCamelCase ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowercase__ : Optional[int] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
] , )
@require_tf
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : int = image_classifier(_lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowercase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
{'''score''': 0.3_3_3, '''label''': ANY(_lowerCamelCase )},
],
] , )
@slow
@require_torch
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Any = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowercase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Optional[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowercase__ : int = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[int] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowercase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : List[Any] = image_classifier(_lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowercase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 198 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__snake_case : Dict =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(_lowerCAmelCase ,**_lowerCAmelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Union[str, Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a photo of {}." ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = load_image(_lowerCAmelCase )
lowerCAmelCase__ : Any = self.image_processor(images=[image] ,return_tensors=self.framework )
lowerCAmelCase__ : Optional[int] = candidate_labels
lowerCAmelCase__ : Optional[int] = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ : int = self.tokenizer(_lowerCAmelCase ,return_tensors=self.framework ,padding=_lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = [text_inputs]
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = model_inputs.pop('''candidate_labels''' )
lowerCAmelCase__ : int = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_lowerCAmelCase ):
lowerCAmelCase__ : Optional[Any] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : Any = text_inputs[0][0]
lowerCAmelCase__ : str = self.model(**_lowerCAmelCase ,**_lowerCAmelCase )
lowerCAmelCase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_outputs.pop('''candidate_labels''' )
lowerCAmelCase__ : Union[str, Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Any = probs.tolist()
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCAmelCase__ : List[Any] = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Dict = stable_softmax(_lowerCAmelCase ,axis=-1 )
lowerCAmelCase__ : List[str] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : List[Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase ,_lowerCAmelCase ) ,key=lambda __lowerCamelCase : -x[0] )
]
return result
| 354 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =IFInpaintingSuperResolutionPipeline
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
snake_case_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
snake_case_ =PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Dict:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : str = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 94 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase ) -> list[int]:
if num <= 0:
snake_case_ = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(UpperCAmelCase )
snake_case_ = [True] * (num + 1)
snake_case_ = []
snake_case_ = 2
snake_case_ = int(math.sqrt(UpperCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCAmelCase ):
if sieve[i] is True:
snake_case_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 69 | """simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> None:
create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
for i in range(len(UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ = True
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase )
current_sequence.pop()
snake_case_ = False
__UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 69 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__UpperCAmelCase : Optional[Any] = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ )
else:
__UpperCAmelCase : int = None
__UpperCAmelCase : Tuple = torch.nn.Parameter(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self : str , UpperCAmelCase_ : VQModel , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : TransformeraDModel , UpperCAmelCase_ : VQDiffusionScheduler , UpperCAmelCase_ : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=UpperCAmelCase_ , transformer=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = len(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else 1
# get prompt text embeddings
__UpperCAmelCase : Optional[Any] = self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__UpperCAmelCase : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCAmelCase : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__UpperCAmelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
__UpperCAmelCase : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__UpperCAmelCase : Any = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCAmelCase_ )
# duplicate text embeddings for each generation per prompt
__UpperCAmelCase : str = prompt_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__UpperCAmelCase : str = self.learned_classifier_free_sampling_embeddings.embeddings
__UpperCAmelCase : str = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCAmelCase_ , 1 , 1 )
else:
__UpperCAmelCase : int = [""] * batch_size
__UpperCAmelCase : Tuple = text_input_ids.shape[-1]
__UpperCAmelCase : Tuple = self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" , )
__UpperCAmelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__UpperCAmelCase : Any = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCAmelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__UpperCAmelCase : str = negative_prompt_embeds.repeat(1 , UpperCAmelCase_ , 1 )
__UpperCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , ):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : int = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : Any = len(UpperCAmelCase_ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}" )
__UpperCAmelCase : Union[str, Any] = batch_size * num_images_per_prompt
__UpperCAmelCase : str = guidance_scale > 1.0
__UpperCAmelCase : Optional[int] = self._encode_prompt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(UpperCAmelCase_ )}." )
# get the initial completely masked latents unless the user supplied it
__UpperCAmelCase : str = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__UpperCAmelCase : List[Any] = self.transformer.num_vector_embeds - 1
__UpperCAmelCase : int = torch.full(UpperCAmelCase_ , UpperCAmelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
__UpperCAmelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
__UpperCAmelCase : Tuple = self.scheduler.timesteps.to(self.device )
__UpperCAmelCase : Any = latents
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__UpperCAmelCase : int = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__UpperCAmelCase : Any = self.transformer(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ ).sample
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[Any] = model_output.chunk(2 )
__UpperCAmelCase : Optional[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCAmelCase_ , dim=1 , keepdim=UpperCAmelCase_ )
__UpperCAmelCase : int = self.truncate(UpperCAmelCase_ , UpperCAmelCase_ )
# remove `log(0)`'s (`-inf`s)
__UpperCAmelCase : Optional[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = self.vqvae.config.vq_embed_dim
__UpperCAmelCase : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__UpperCAmelCase : List[str] = self.vqvae.quantize.get_codebook_entry(UpperCAmelCase_ , shape=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.vqvae.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ ).sample
__UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : int = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float ):
"""simple docstring"""
__UpperCAmelCase : List[str] = torch.sort(UpperCAmelCase_ , 1 , descending=UpperCAmelCase_ )
__UpperCAmelCase : int = torch.exp(UpperCAmelCase_ )
__UpperCAmelCase : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__UpperCAmelCase : int = torch.full_like(keep_mask[:, 0:1, :] , UpperCAmelCase_ )
__UpperCAmelCase : str = torch.cat((all_true, keep_mask) , dim=1 )
__UpperCAmelCase : List[Any] = keep_mask[:, :-1, :]
__UpperCAmelCase : Any = keep_mask.gather(1 , indices.argsort(1 ) )
__UpperCAmelCase : int = log_p_x_0.clone()
__UpperCAmelCase : int = -torch.inf # -inf = log(0)
return rv
| 356 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase ):
if not nums:
raise ValueError("List is empty" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a_ (_a ):
__lowerCAmelCase : Any = '''glpn'''
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=6_4 , snake_case_=1_0 , snake_case_=-1 , **snake_case_ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = num_encoder_blocks
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : List[Any] = sr_ratios
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Optional[Any] = patch_sizes
_lowerCAmelCase : Optional[Any] = strides
_lowerCAmelCase : Optional[Any] = mlp_ratios
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Tuple = decoder_hidden_size
_lowerCAmelCase : Any = max_depth
_lowerCAmelCase : Tuple = head_in_index
| 309 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Optional[int] = [1]
for i in range(2 , a_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE :List[str] = []
__SCREAMING_SNAKE_CASE :Optional[Any] = list(range(a_ ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE :Optional[int] = factorials.pop()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = divmod(a_ , a_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 | 0 |
from math import loga
def snake_case_(_UpperCamelCase ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 - _cos) / 2
_snake_case = 1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 + _cos) / 2
_snake_case = -1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = _sin / 2
_snake_case = 0
_snake_case = -ba
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 1 - alpha
_snake_case = -2 * _cos
_snake_case = 1 + alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = 1 + alpha * big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha * big_a
_snake_case = 1 + alpha / big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha / big_a
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (pmc + aaa)
_snake_case = 2 * big_a * mpc
_snake_case = big_a * (pmc - aaa)
_snake_case = ppmc + aaa
_snake_case = -2 * pmpc
_snake_case = ppmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (ppmc + aaa)
_snake_case = -2 * big_a * pmpc
_snake_case = big_a * (ppmc - aaa)
_snake_case = pmc + aaa
_snake_case = 2 * mpc
_snake_case = pmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 278 | 1 |
from ....utils import logging
lowercase : Any = logging.get_logger(__name__)
class A__ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=None , lowercase=2048) -> Optional[int]:
'''simple docstring'''
a__ : int = config.__dict__
a__ : List[Any] = modal_hidden_size
if num_labels:
a__ : Union[str, Any] = num_labels
| 99 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case__ : int = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _a ( lowerCamelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase , torch.Tensor ):
return image
elif isinstance(lowerCamelCase , PIL.Image.Image ):
__A = [image]
__A = [trans(img.convert('''RGB''' ) ) for img in image]
__A = torch.stack(lowerCamelCase )
return image
class A_ ( _lowerCamelCase ):
def __init__(self :List[str] , _UpperCamelCase :List[Any] , _UpperCamelCase :List[str] )-> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
def _lowerCAmelCase (self :int , _UpperCamelCase :Optional[Any] )-> Union[str, Any]:
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Dict , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] )-> Union[str, Any]:
# get the original timestep using init_timestep
__A = min(int(num_inference_steps * strength ) , _UpperCamelCase )
__A = max(num_inference_steps - init_timestep , 0 )
__A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase (self :str , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :Dict=None )-> List[str]:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}""" )
__A = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__A = init_latents.shape
__A = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
print('''add noise to latents at timestep''' , _UpperCamelCase )
__A = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__A = init_latents
return latents
@torch.no_grad()
def __call__(self :List[str] , _UpperCamelCase :Union[torch.FloatTensor, PIL.Image.Image] = None , _UpperCamelCase :float = 0.8 , _UpperCamelCase :int = 1 , _UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase :float = 0.0 , _UpperCamelCase :int = 50 , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :Optional[str] = "pil" , _UpperCamelCase :bool = True , )-> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(_UpperCamelCase )
# 2. Preprocess image
__A = preprocess(_UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
__A , __A = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , self.device )
__A = timesteps[:1].repeat(_UpperCamelCase )
# 4. Prepare latent variables
__A = self.prepare_latents(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.unet.dtype , self.device , _UpperCamelCase )
__A = latents
# 5. Denoising loop
for t in self.progress_bar(_UpperCamelCase ):
# 1. predict noise model_output
__A = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , eta=_UpperCamelCase , use_clipped_model_output=_UpperCamelCase , generator=_UpperCamelCase , ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_UpperCamelCase )
| 117 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
a_ : Optional[Any] = '\nimport os\n'
a_ : List[str] = '\ndef foo():\n import os\n return False\n'
a_ : Optional[int] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
a_ : str = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
a_ : Any = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
a_ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
a_ : Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
a_ : int = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
a_ : Tuple = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
a_ : Optional[int] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
a_ : List[str] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'test_file.py')
with open(_UpperCAmelCase , 'w') as _tmp_file:
_tmp_file.write(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_imports(_UpperCAmelCase)
assert parsed_imports == ["os"]
| 327 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
a_ : List[Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
a_ : List[str] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a=True , a=False) -> Optional[Any]:
if rouge_types is None:
SCREAMING_SNAKE_CASE = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE = rouge_scorer.RougeScorer(rouge_types=a , use_stemmer=a)
if use_aggregator:
SCREAMING_SNAKE_CASE = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE = []
for ref, pred in zip(a , a):
SCREAMING_SNAKE_CASE = scorer.score(a , a)
if use_aggregator:
aggregator.add_scores(a)
else:
scores.append(a)
if use_aggregator:
SCREAMING_SNAKE_CASE = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE = [score[key] for score in scores]
return result
| 327 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = " " ):
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = 0
for index, char in enumerate(SCREAMING_SNAKE_CASE ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCAmelCase = index + 1
elif index + 1 == len(SCREAMING_SNAKE_CASE ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 46 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return "".join(sorted(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return word_by_signature[signature(UpperCAmelCase_ )]
snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
snake_case : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 94 | 0 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 354 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 0 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : Tuple = [x.strip() for x in open(UpperCamelCase__ ).readlines()]
_UpperCAmelCase : List[Any] = [x.strip() for x in open(UpperCamelCase__ ).readlines()][: len(UpperCamelCase__ )]
_UpperCAmelCase : Tuple = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
if save_path is not None:
save_json(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 263 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''xlm-roberta-xl'''
def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : int = position_embedding_type
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 278 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 278 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''speech_to_text_2'''
_UpperCAmelCase : Optional[int] = ['''past_key_values''']
_UpperCAmelCase : Dict = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , lowerCAmelCase__ : Dict=1_0000 , lowerCAmelCase__ : Dict=6 , lowerCAmelCase__ : Dict=2048 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]="relu" , lowerCAmelCase__ : str=256 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=1024 , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] = d_model
SCREAMING_SNAKE_CASE_: str = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: str = decoder_layers
SCREAMING_SNAKE_CASE_: int = decoder_attention_heads
SCREAMING_SNAKE_CASE_: Tuple = dropout
SCREAMING_SNAKE_CASE_: Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_: List[str] = activation_function
SCREAMING_SNAKE_CASE_: Union[str, Any] = init_std
SCREAMING_SNAKE_CASE_: Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE_: List[Any] = use_cache
SCREAMING_SNAKE_CASE_: List[str] = decoder_layers
SCREAMING_SNAKE_CASE_: Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_: List[Any] = max_target_positions
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : List[str] = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'data2vec-vision'
def __init__( self : str , lowerCAmelCase_ : Optional[Any]=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Dict=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=1e-12 , lowerCAmelCase_ : Dict=2_24 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=[3, 5, 7, 11] , lowerCAmelCase_ : Optional[int]=[1, 2, 3, 6] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=0.4 , lowerCAmelCase_ : Any=2_56 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2_55 , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
A__ : Dict =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : Any =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : Optional[Any] =initializer_range
A__ : Union[str, Any] =layer_norm_eps
A__ : Dict =image_size
A__ : List[Any] =patch_size
A__ : Optional[Any] =num_channels
A__ : Optional[int] =use_mask_token
A__ : List[Any] =use_absolute_position_embeddings
A__ : str =use_relative_position_bias
A__ : List[Any] =use_shared_relative_position_bias
A__ : Dict =layer_scale_init_value
A__ : str =drop_path_rate
A__ : str =use_mean_pooling
# decode head attributes (semantic segmentation)
A__ : List[Any] =out_indices
A__ : Union[str, Any] =pool_scales
# auxiliary head attributes (semantic segmentation)
A__ : Optional[int] =use_auxiliary_head
A__ : Tuple =auxiliary_loss_weight
A__ : int =auxiliary_channels
A__ : int =auxiliary_num_convs
A__ : Optional[Any] =auxiliary_concat_input
A__ : int =semantic_loss_ignore_index
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 1e-4
| 361 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 136 | 0 |
"""simple docstring"""
from math import factorial
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
__lowerCAmelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowerCAmelCase = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 57 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return lst
UpperCamelCase_ = 1
while i < len(SCREAMING_SNAKE_CASE_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCamelCase_ , UpperCamelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCamelCase_ = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE :Optional[int] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 370 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , )-> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_choices
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_attention_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase_ ( self )-> str:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("albert-base-v2" )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
UpperCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )[0]
UpperCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
UpperCamelCase_ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 60 | 0 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise TypeError('only integers accepted as input' )
else:
__UpperCamelCase = str(abs(__lowercase ) )
__UpperCamelCase = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 310 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Optional[int] = data
lowerCAmelCase_ : Tuple = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = b'\x80' + b'\x00' * (6_3 - (len(self.data ) + 8) % 6_4)
lowerCAmelCase_ : List[str] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self : int ):
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE_ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
lowerCAmelCase_ : int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Any = self.padding()
lowerCAmelCase_ : Tuple = self.split_blocks()
for block in self.blocks:
lowerCAmelCase_ : List[str] = self.expand_block(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
lowerCAmelCase_ : Union[str, Any] = (b & c) | ((~b) & d)
lowerCAmelCase_ : Optional[Any] = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
lowerCAmelCase_ : int = b ^ c ^ d
lowerCAmelCase_ : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
lowerCAmelCase_ : str = (b & c) | (b & d) | (c & d)
lowerCAmelCase_ : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
lowerCAmelCase_ : Optional[Any] = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = 0XCA_62_C1_D6
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : int = (
self.rotate(SCREAMING_SNAKE_CASE_ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(SCREAMING_SNAKE_CASE_ , 3_0 ),
c,
d,
)
lowerCAmelCase_ : Any = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : str = b'Test String'
assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase_ : List[str] = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.read()
else:
lowerCAmelCase_ : List[str] = bytes(lowerCAmelCase__ , 'utf-8' )
print(SHAaHash(lowerCAmelCase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 289 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=3_6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : str = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = embedding_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_hidden_groups
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Dict = num_choices
lowerCAmelCase_ : Tuple = scope
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Union[str, Any] = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Optional[Any] = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : str = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : List[str] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : List[Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.num_choices
lowerCAmelCase_ : int = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=False ):
lowerCAmelCase_ : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = AlbertModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 289 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.