code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __UpperCamelCase :
A_ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
A_ = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
A_ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
A_ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
A_ = field(default=2 , metadata={"help": "Batch size for training."} )
A_ = field(default=2 , metadata={"help": "Batch size for evaluation."} )
A_ = field(default=0.1 , metadata={"help": "Value of weight decay."} )
A_ = field(
default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
A_ = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
A_ = field(default="cosine" , metadata={"help": "Learning rate."} )
A_ = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
A_ = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
A_ = field(default=50000 , metadata={"help": "Maximum number of training steps."} )
A_ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
A_ = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
A_ = field(default=1 , metadata={"help": "Training seed."} )
A_ = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __UpperCamelCase :
A_ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
A_ = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
A_ = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
A_ = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
A_ = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
A_ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __UpperCamelCase :
A_ = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "Number of workers used for code evaluation."} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Sample from the language model's output distribution."} )
A_ = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
A_ = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
A_ = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
A_ = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
A_ = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
A_ = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
A_ = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
A_ = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
A_ = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
A_ = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __UpperCamelCase :
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
A_ = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
A_ = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
A_ = field(
default=100000 , metadata={"help": "Number of files to save per JSON output file."} )
A_ = field(default="content" , metadata={"help": "Column containing text data to process."} )
A_ = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
A_ = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
A_ = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
A_ = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
A_ = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
A_ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "If True, near-duplicate samples are removed."} )
A_ = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __UpperCamelCase :
A_ = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
A_ = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
A_ = field(default="content" , metadata={"help": "Column containing text data to process."} )
A_ = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} )
A_ = field(
default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} )
A_ = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __UpperCamelCase :
A_ = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
A_ = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
A_ = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __UpperCamelCase :
A_ = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
A_ = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
A_ = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "Push saved tokenizer to the hub."} )
| 27 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[Any] = 10
def _a (self ):
A_ : Dict = [1, 2, 3, 4]
A_ : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[str] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
A_, A_ : Dict = process_story(lowercase )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[int] = """"""
A_, A_ : List[str] = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
A_, A_ : int = process_story(lowercase )
A_ : Optional[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowercase , lowercase )
A_ : Dict = ["""It was the best of times."""]
self.assertEqual(lowercase , lowercase )
def _a (self ):
A_ : Optional[int] = torch.tensor([1, 2, 3, 4] )
A_ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def _a (self ):
A_ : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def _a (self ):
A_ : Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def _a (self ):
A_ : List[Any] = 101
A_ : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Dict = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase ) | 206 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''big_bird'''
def __init__( self :Union[str, Any] , __magic_name__ :Any=5_0358 , __magic_name__ :List[str]=768 , __magic_name__ :List[str]=12 , __magic_name__ :Optional[int]=12 , __magic_name__ :List[str]=3072 , __magic_name__ :Dict="gelu_new" , __magic_name__ :List[Any]=0.1 , __magic_name__ :Any=0.1 , __magic_name__ :Optional[int]=4096 , __magic_name__ :Union[str, Any]=2 , __magic_name__ :int=0.02 , __magic_name__ :int=1E-1_2 , __magic_name__ :Any=True , __magic_name__ :Any=0 , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Any=2 , __magic_name__ :Dict=66 , __magic_name__ :int="block_sparse" , __magic_name__ :List[Any]=True , __magic_name__ :Dict=False , __magic_name__ :Tuple=64 , __magic_name__ :Dict=3 , __magic_name__ :Union[str, Any]=None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , sep_token_id=__magic_name__ , **__magic_name__ , )
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = type_vocab_size
a = layer_norm_eps
a = use_cache
a = rescale_embeddings
a = attention_type
a = use_bias
a = block_size
a = num_random_blocks
a = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 347 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __magic_name__ :List[str] , __magic_name__ :List[Any]=13 , __magic_name__ :Any=7 , __magic_name__ :Optional[int]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :Any=99 , __magic_name__ :List[str]=32 , __magic_name__ :List[str]=5 , __magic_name__ :str=4 , __magic_name__ :str=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :int=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :Any=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(__magic_name__ )[0]
a = 5_0000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 347 | 1 |
'''simple docstring'''
import enum
import shutil
import sys
UpperCamelCase__ , UpperCamelCase__: Tuple = shutil.get_terminal_size()
UpperCamelCase__: int = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class SCREAMING_SNAKE_CASE( enum.Enum ):
"""simple docstring"""
lowerCamelCase__ = 0
lowerCamelCase__ = 1
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int]="" ) -> Tuple:
sys.stdout.write(str(_lowerCAmelCase ) + end )
sys.stdout.flush()
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any="" ) -> List[str]:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , _lowerCAmelCase )
def snake_case_ ( ) -> Any:
forceWrite('''\r''' )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : str ) -> Dict:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def snake_case_ ( ) -> List[str]:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def snake_case_ ( ) -> int:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 23 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''GPU''' , font_size=24 )
UpperCAmelCase : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''Model''' , font_size=24 )
UpperCAmelCase : Tuple = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
UpperCAmelCase : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
UpperCAmelCase : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase : List[Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase : Any = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
UpperCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 23 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _A ):
a : Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
a : Dict = 192
a : Optional[Any] = 768
a : Optional[int] = 12
a : Any = 3
a : List[Any] = [800, 1333]
a : Tuple = False
elif yolos_name == "yolos_s_dWr":
a : Dict = 330
a : Any = 14
a : Tuple = 6
a : Any = 1320
elif "yolos_s" in yolos_name:
a : Any = 384
a : int = 1536
a : List[Any] = 12
a : Tuple = 6
elif "yolos_b" in yolos_name:
a : Tuple = [800, 1344]
a : Dict = 91
a : Any = 'huggingface/label-files'
a : List[str] = 'coco-detection-id2label.json'
a : Union[str, Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
a : List[str] = {int(_A ): v for k, v in idalabel.items()}
a : Tuple = idalabel
a : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _A , _A , _A = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
a : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : List[Any] = in_proj_weight[: config.hidden_size, :]
a : Dict = in_proj_bias[: config.hidden_size]
a : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Optional[int] = in_proj_weight[-config.hidden_size :, :]
a : str = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _A ):
if "backbone" in name:
a : Tuple = name.replace('backbone' , 'vit' )
if "cls_token" in name:
a : List[str] = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
a : List[str] = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
a : Dict = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
a : Union[str, Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a : Optional[int] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
a : Dict = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
a : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Dict = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
a : List[str] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
a : List[Any] = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
a : List[str] = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase__ ( _A , _A ):
for key in orig_state_dict.copy().keys():
a : Dict = orig_state_dict.pop(_A )
if "qkv" in key:
a : Tuple = key.split('.' )
a : Any = int(key_split[2] )
a : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
a : List[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : int = val[-dim:, :]
else:
a : Any = val[:dim]
a : Optional[int] = val[dim : dim * 2]
a : Dict = val[-dim:]
else:
a : Any = val
return orig_state_dict
def lowerCamelCase__ ( ):
a : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : List[Any] = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A , _A = False ):
a : int = get_yolos_config(_A )
# load original state_dict
a : Dict = torch.load(_A , map_location='cpu' )['model']
# load 🤗 model
a : str = YolosForObjectDetection(_A )
model.eval()
a : Any = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
a : Dict = 800 if yolos_name != 'yolos_ti' else 512
a : str = YolosImageProcessor(format='coco_detection' , size=_A )
a : List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
a : List[Any] = model(**_A )
a , a : List[Any] = outputs.logits, outputs.pred_boxes
a , a : List[str] = None, None
if yolos_name == "yolos_ti":
a : List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
a : Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
a : List[str] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
a : Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
a : Tuple = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
a : str = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
a : str = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
a : Dict = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
a : int = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
a : str = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
a : List[Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
a : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='hustvl' )
model.push_to_hub(_A , organization='hustvl' )
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase: Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 96 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Optional[int] ):
a : int = ''
a : List[str] = ''
a : int = []
a : Optional[Any] = 0
a : Optional[Any] = 2_56
a : int = 0
a : Optional[int] = 0
a : str = 0
a : int = 0
def lowercase_ ( self : List[str] , __snake_case : int ):
a : Optional[Any] = cva.imread(__snake_case , 0 )
a : int = copy.deepcopy(self.img )
a , a , a : Optional[int] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : str = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : List[str] = x[i] / self.k
self.sk += prk
a : List[Any] = (self.L - 1) * self.sk
if self.rem != 0:
a : Union[str, Any] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : int = int(np.ma.count(self.img ) / self.img[1].size )
a : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Tuple = self.img[j][i]
if num != self.last_list[num]:
a : Union[str, Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Union[str, Any] ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : Any ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Dict = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 96 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE :Any = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 |
"""simple docstring"""
from math import isclose, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]:
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase (SCREAMING_SNAKE_CASE_ : float = 1.4 , SCREAMING_SNAKE_CASE_ : float = -9.6 ) -> int:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 113 | 0 |
"""simple docstring"""
import numpy as np
A : Optional[Any] = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = np.array(__a )
def snake_case ( self , __a ):
__lowerCAmelCase , __lowerCAmelCase = np.where(letter == self.SQUARE )
__lowerCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def snake_case ( self , __a ):
__lowerCAmelCase = message.lower()
__lowerCAmelCase = message.replace(" " , "" )
__lowerCAmelCase = message.replace("j" , "i" )
__lowerCAmelCase = np.empty((2, len(__a )) )
for letter_index in range(len(__a ) ):
__lowerCAmelCase = self.letter_to_numbers(message[letter_index] )
__lowerCAmelCase = numbers[0]
__lowerCAmelCase = numbers[1]
__lowerCAmelCase = first_step.reshape(2 * len(__a ) )
__lowerCAmelCase = ""
for numbers_index in range(len(__a ) ):
__lowerCAmelCase = int(second_step[numbers_index * 2] )
__lowerCAmelCase = int(second_step[(numbers_index * 2) + 1] )
__lowerCAmelCase = self.numbers_to_letter(__a , __a )
__lowerCAmelCase = encoded_message + letter
return encoded_message
def snake_case ( self , __a ):
__lowerCAmelCase = message.lower()
message.replace(" " , "" )
__lowerCAmelCase = np.empty(2 * len(__a ) )
for letter_index in range(len(__a ) ):
__lowerCAmelCase = self.letter_to_numbers(message[letter_index] )
__lowerCAmelCase = numbers[0]
__lowerCAmelCase = numbers[1]
__lowerCAmelCase = first_step.reshape((2, len(__a )) )
__lowerCAmelCase = ""
for numbers_index in range(len(__a ) ):
__lowerCAmelCase = int(second_step[0, numbers_index] )
__lowerCAmelCase = int(second_step[1, numbers_index] )
__lowerCAmelCase = self.numbers_to_letter(__a , __a )
__lowerCAmelCase = decoded_message + letter
return decoded_message
| 371 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ):
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def snake_case ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case ( self , __a ):
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = AutoformerModel(config=__a ).to(__a ).eval()
__lowerCAmelCase = model(**__a )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**__a )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : List[Any] =(AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Tuple ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Dict =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Optional[Any] =False
def snake_case ( self ):
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["missing_keys"] , [] )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="Model has no tokens embeddings" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase = inspect.signature(getattr(__a , "forward" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , "seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "decoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "d_model" , __a )
__lowerCAmelCase = getattr(self.model_tester , "num_attention_heads" , __a )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case ( self ):
super().test_retain_grad_hidden_states_attentions()
def _lowerCamelCase ( _UpperCamelCase="train-batch.pt" ):
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_UpperCamelCase , repo_type="dataset" )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowerCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__a )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1 ) )
| 259 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="resnet50" , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=True , __snake_case=True , ) -> Dict:
'''simple docstring'''
__a =parent
__a =out_indices if out_indices is not None else [4]
__a =stage_names
__a =out_features
__a =backbone
__a =batch_size
__a =image_size
__a =num_channels
__a =use_pretrained_backbone
__a =is_training
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a =self.get_config()
return config, pixel_values
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =TimmBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
__a =model(__snake_case )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
__a , __a =config_and_inputs
__a ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =TimmBackboneModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a ='resnet18'
__a ='microsoft/resnet-18'
__a =AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case )
__a =AutoBackbone.from_pretrained(__snake_case )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__a =AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case , out_indices=[1, 2, 3] )
__a =AutoBackbone.from_pretrained(__snake_case , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a , __a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =model_class(__snake_case )
__a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a =[*signature.parameters.keys()]
__a =['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a , __a =self.model_tester.prepare_config_and_inputs_for_common()
__a =True
__a =self.has_attentions
# no need to test all models as different heads yield the same functionality
__a =self.all_model_classes[0]
__a =model_class(__snake_case )
model.to(__snake_case )
__a =self._prepare_for_class(__snake_case , __snake_case )
__a =model(**__snake_case )
__a =outputs[0][-1]
# Encoder-/Decoder-only models
__a =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__a =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__snake_case )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a , __a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =model_class(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(**__snake_case )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__a =copy.deepcopy(__snake_case )
__a =None
__a =model_class(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(**__snake_case )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__a =copy.deepcopy(__snake_case )
__a =False
__a =model_class(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(**__snake_case )
| 218 |
from __future__ import annotations
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =str(_snake_case )
return len(_snake_case ) == 9 and set(_snake_case ) == set('123456789' )
def UpperCamelCase_( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
__a =100002 * base_num
if is_9_pandigital(_snake_case ):
return candidate
for base_num in range(333 , 99 , -1 ):
__a =1002003 * base_num
if is_9_pandigital(_snake_case ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 218 | 1 |
import math
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[float], float] , snake_case__ :float , snake_case__ :float) -> Any:
_A = xa
_A = xa
while True:
if x_n == x_na or function(snake_case__) == function(snake_case__):
raise ZeroDivisionError("""float division by zero, could not find root""")
_A = x_na - (
function(snake_case__) / ((function(snake_case__) - function(snake_case__)) / (x_na - x_n))
)
if abs(x_na - x_na) < 10**-5:
return x_na
_A = x_na
_A = x_na
def snake_case ( snake_case__ :float) -> Optional[int]:
return math.pow(snake_case__ , 3) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 361 | import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 81 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ):
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase__ = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
def UpperCAmelCase_ (self ):
return list(self.graph )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ):
if s == d:
return []
UpperCamelCase__ = []
UpperCamelCase__ = []
if s == -2:
UpperCamelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase__ = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-1 ):
if c == -1:
UpperCamelCase__ = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCamelCase__ = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 ):
UpperCamelCase__ = deque()
UpperCamelCase__ = []
if s == -2:
UpperCamelCase__ = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return len(self.graph[u] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 ):
UpperCamelCase__ = []
UpperCamelCase__ = []
if s == -2:
UpperCamelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = s
UpperCamelCase__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase__ = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return sorted_nodes
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -2
UpperCamelCase__ = []
UpperCamelCase__ = s
UpperCamelCase__ = False
UpperCamelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase__ = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = s
UpperCamelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -2
UpperCamelCase__ = []
UpperCamelCase__ = s
UpperCamelCase__ = False
UpperCamelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase__ = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = s
UpperCamelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ):
UpperCamelCase__ = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = time()
return end - begin
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 ):
UpperCamelCase__ = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = time()
return end - begin
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ):
# check if the u exists
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase__ = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase__ = [[w, u]]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE_ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ):
if s == d:
return []
UpperCamelCase__ = []
UpperCamelCase__ = []
if s == -2:
UpperCamelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase__ = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return visited
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-1 ):
if c == -1:
UpperCamelCase__ = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCamelCase__ = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 ):
UpperCamelCase__ = deque()
UpperCamelCase__ = []
if s == -2:
UpperCamelCase__ = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
while d:
UpperCamelCase__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return len(self.graph[u] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -2
UpperCamelCase__ = []
UpperCamelCase__ = s
UpperCamelCase__ = False
UpperCamelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase__ = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = s
UpperCamelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return list(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE_ )
visited.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -2
UpperCamelCase__ = []
UpperCamelCase__ = s
UpperCamelCase__ = False
UpperCamelCase__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ = True
if len(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCamelCase__ = stack[len(SCREAMING_SNAKE_CASE_ ) - 1]
else:
UpperCamelCase__ = False
indirect_parents.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = s
UpperCamelCase__ = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
def UpperCAmelCase_ (self ):
return list(self.graph )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 , SCREAMING_SNAKE_CASE_=-1 ):
UpperCamelCase__ = time()
self.dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = time()
return end - begin
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=-2 ):
UpperCamelCase__ = time()
self.bfs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = time()
return end - begin
| 244 |
def __magic_name__ ( __a : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__a ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 244 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(a_ )
# We need to create solution object to save path.
__lowerCAmelCase = [[0 for _ in range(a_ )] for _ in range(a_ )]
__lowerCAmelCase = run_maze(a_ , 0 , 0 , a_ )
if solved:
print("""\n""".join(str(a_ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = len(a_ )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase = 1
return True
__lowerCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase = 1
# check for directions
if (
run_maze(a_ , i + 1 , a_ , a_ )
or run_maze(a_ , a_ , j + 1 , a_ )
or run_maze(a_ , i - 1 , a_ , a_ )
or run_maze(a_ , a_ , j - 1 , a_ )
):
return True
__lowerCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[str] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46 | 0 |
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[str] ):
# Return True if there is node that has not iterated.
UpperCamelCase_ : List[str] = [False] * len(lowerCamelCase )
UpperCamelCase_ : List[str] = []
queue.append(lowerCamelCase )
UpperCamelCase_ : str = True
while queue:
UpperCamelCase_ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = u
return visited[t]
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ):
# This array is filled by BFS and to store path
UpperCamelCase_ : int = [-1] * (len(lowerCamelCase ))
UpperCamelCase_ : int = 0
while bfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : int = float('Inf' )
UpperCamelCase_ : Any = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase_ : int = min(lowerCamelCase , graph[parent[s]][s] )
UpperCamelCase_ : Optional[Any] = parent[s]
max_flow += path_flow
UpperCamelCase_ : Any = sink
while v != source:
UpperCamelCase_ : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase_ : Optional[int] = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 175 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCAmelCase_ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
UpperCAmelCase_ : str = json.load(f)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def __A ( self , UpperCAmelCase__ ):
return FSMTTokenizer.from_pretrained(UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ ):
A__ = FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
A__ = F"""facebook/wmt19-{pair}"""
A__ = self.get_tokenizer(UpperCAmelCase__ )
A__ = self.get_model(UpperCAmelCase__ )
A__ = bleu_data[pair]["src"]
A__ = bleu_data[pair]["tgt"]
A__ = tokenizer(UpperCAmelCase__ , return_tensors="pt" , truncation=UpperCAmelCase__ , padding="longest" ).to(UpperCAmelCase__ )
A__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A__ = tokenizer.batch_decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )
A__ = calculate_bleu(UpperCAmelCase__ , UpperCAmelCase__ )
print(UpperCAmelCase__ )
self.assertGreaterEqual(scores["bleu"] , UpperCAmelCase__ )
| 371 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(
UpperCAmelCase__ , split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = field
A__ = path_or_paths if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else {self.split: path_or_paths}
A__ = Json(
cache_dir=UpperCAmelCase__ , data_files=UpperCAmelCase__ , features=UpperCAmelCase__ , field=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
A__ = dataset
A__ = path_or_buf
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = "utf-8"
A__ = to_json_kwargs
def __A ( self ):
A__ = self.to_json_kwargs.pop("path_or_buf" , UpperCAmelCase__ )
A__ = self.to_json_kwargs.pop("orient" , "records" )
A__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
A__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
A__ = self.to_json_kwargs.pop("compression" , UpperCAmelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=UpperCAmelCase__ ) as buffer:
A__ = self._write(file_obj=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
A__ = self._write(
file_obj=self.path_or_buf , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
return written
def __A ( self , UpperCAmelCase__ ):
A__ , A__ , A__ , A__ , A__ = args
A__ = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas().to_json(
path_or_buf=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **UpperCAmelCase__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ):
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCAmelCase__ )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase__ , UpperCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(UpperCAmelCase__ )
return written
| 198 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def UpperCamelCase_ ( lowerCAmelCase__ : str = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(lowerCAmelCase__ ):
lowerCAmelCase_ : str = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCAmelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ).lstrip('./' )
def UpperCamelCase_ ( lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
return f"{i * ' '}*" if i else "\n##"
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCAmelCase__ ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(lowerCAmelCase__ )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def UpperCamelCase_ ( lowerCAmelCase__ : str = "." ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Any = ''
for filepath in sorted(good_file_paths(lowerCAmelCase__ ) ):
lowerCAmelCase_ ,lowerCAmelCase_ : str = os.path.split(lowerCAmelCase__ )
if filepath != old_path:
lowerCAmelCase_ : Dict = print_path(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase_ : int = f"{filepath}/{filename}".replace(' ' , '%20' )
lowerCAmelCase_ : str = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f"{md_prefix(lowerCAmelCase__ )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 224 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
return False
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCAmelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : str = input("""Enter numbers separated by comma:\n""").strip()
lowercase__ : Optional[int] = [int(item.strip()) for item in user_input.split(""",""")]
lowercase__ : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
lowercase__ : int = """""" if binary_search(sequence, target) else """not """
print(f'{target} was {not_str}found in {sequence}')
| 224 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case : Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['DeiTFeatureExtractor']
_snake_case : List[Any] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(__a )
class __lowercase ( __a ):
"""simple docstring"""
def __init__( self , *A , **A ) -> int:
'''simple docstring'''
super().__init__(*_A , **_A )
requires_backends(self , """vision""" )
self.check_model_type(_A )
def __call__( self , A , **A ) -> Optional[int]:
'''simple docstring'''
return super().__call__(_A , **_A )
def __A ( self , **A ) -> Tuple:
'''simple docstring'''
return {}, {}, {}
def __A ( self , A ) -> str:
'''simple docstring'''
lowerCamelCase = load_image(_A )
lowerCamelCase = image.size
lowerCamelCase = self.image_processor(images=_A , return_tensors=self.framework )
return model_inputs
def __A ( self , A ) -> Any:
'''simple docstring'''
lowerCamelCase = self.model(**_A )
return model_outputs
def __A ( self , A ) -> Any:
'''simple docstring'''
lowerCamelCase = model_outputs.predicted_depth
lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_A )
lowerCamelCase = prediction.squeeze().cpu().numpy()
lowerCamelCase = (output * 2_55 / np.max(_A )).astype("""uint8""" )
lowerCamelCase = Image.fromarray(_A )
lowerCamelCase = {}
lowerCamelCase = predicted_depth
lowerCamelCase = depth
return output_dict
| 252 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any]=8 ) -> Union[str, Any]:
_UpperCAmelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=512, _lowerCAmelCase : Optional[Any]=512 ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 )
_UpperCAmelCase : List[Any] = np.array(pil_image.convert("""RGB""" ) )
_UpperCAmelCase : str = arr.astype(np.floataa ) / 127.5 - 1
_UpperCAmelCase : Dict = np.transpose(_lowerCAmelCase, [2, 0, 1] )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( __a):
def __init__( self , _A , _A , _A , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
_UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self , _A , _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = min(int(num_inference_steps * strength ) , _A )
_UpperCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A=None ) -> List[Any]:
'''simple docstring'''
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}''' )
_UpperCAmelCase : Any = image.to(device=_A , dtype=_A )
_UpperCAmelCase : Optional[int] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_UpperCAmelCase : Dict = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_A , _A ):
_UpperCAmelCase : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
_UpperCAmelCase : List[Any] = torch.cat(_A , dim=0 )
else:
_UpperCAmelCase : str = self.movq.encode(_A ).latent_dist.sample(_A )
_UpperCAmelCase : Any = self.movq.config.scaling_factor * init_latents
_UpperCAmelCase : List[Any] = torch.cat([init_latents] , dim=0 )
_UpperCAmelCase : Union[str, Any] = init_latents.shape
_UpperCAmelCase : List[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
_UpperCAmelCase : Optional[int] = self.scheduler.add_noise(_A , _A , _A )
_UpperCAmelCase : Optional[int] = init_latents
return latents
def __snake_case ( self , _A=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __snake_case ( self , _A=0 ) -> int:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_UpperCAmelCase : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_12 , _A = 5_12 , _A = 1_00 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self._execution_device
_UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(_A , _A ):
_UpperCAmelCase : Dict = torch.cat(_A , dim=0 )
_UpperCAmelCase : Any = image_embeds.shape[0]
if isinstance(_A , _A ):
_UpperCAmelCase : Any = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : str = image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
_UpperCAmelCase : str = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_UpperCAmelCase : Union[str, Any] = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
_UpperCAmelCase : List[Any] = image.to(dtype=image_embeds.dtype , device=_A )
_UpperCAmelCase : int = self.movq.encode(_A )["""latents"""]
_UpperCAmelCase : Dict = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(_A , _A , _A )
_UpperCAmelCase : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_UpperCAmelCase , _UpperCAmelCase : str = downscale_height_and_width(_A , _A , self.movq_scale_factor )
_UpperCAmelCase : List[Any] = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
_UpperCAmelCase : str = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = variance_pred.chunk(2 )
_UpperCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
_UpperCAmelCase : Optional[int] = self.movq.decode(_A , force_not_quantize=_A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Any = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 246 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar('KEY')
SCREAMING_SNAKE_CASE__ = TypeVar('VAL')
@dataclass(frozen=lowerCamelCase , slots=lowerCamelCase )
class a_ ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class a_ ( _Item ):
def __init__( self ) -> None:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __bool__( self ) -> bool:
"""simple docstring"""
return False
SCREAMING_SNAKE_CASE__ = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
def __init__( self , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 0.7_5 ) -> None:
"""simple docstring"""
UpperCamelCase = initial_block_size
UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCamelCase = capacity_factor
UpperCamelCase = 0
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return hash(_SCREAMING_SNAKE_CASE ) % len(self._buckets )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase = self._buckets[ind]
if not stored:
UpperCamelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self._len += 1
return True
elif stored.key == key:
UpperCamelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return True
else:
return False
def A__ ( self ) -> bool:
"""simple docstring"""
UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = self._buckets
UpperCamelCase = [None] * new_size
UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def A__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def A__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Iterator[int]:
"""simple docstring"""
UpperCamelCase = self._get_bucket_index(_SCREAMING_SNAKE_CASE )
for _ in range(len(self._buckets ) ):
yield ind
UpperCamelCase = self._get_next_ind(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
if self._try_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
break
def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __delitem__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(_SCREAMING_SNAKE_CASE )
if item is _deleted:
continue
if item.key == key:
UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_SCREAMING_SNAKE_CASE )
def __len__( self ) -> int:
"""simple docstring"""
return self._len
def __iter__( self ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
"""simple docstring"""
UpperCamelCase = """ ,""".join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 363 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__ ( __UpperCamelCase )-> Dict: # picklable for multiprocessing
return x.sum()
def lowercase__ ( __UpperCamelCase )-> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class a_ :
lowercase = 42
lowercase = 42
class a_ ( lowerCamelCase ):
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"""a""": 2, """b""": 3}
UpperCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
UpperCamelCase = {"""a""": 2, """b""": 0, """c""": 2}
UpperCamelCase = {
"""a""": np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": 3, """b""": 4}
UpperCamelCase = {"""a""": 5, """b""": 6}
UpperCamelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
class a_ :
lowercase = """bar"""
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(__UpperCamelCase )}
UpperCamelCase = map_nested(lambda __UpperCamelCase : x + 10 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a_ ( lowerCamelCase ):
@require_tf
def A__ ( self ) -> Any:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A__ ( self ) -> int:
"""simple docstring"""
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A__ ( self ) -> Dict:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def lowercase__ ( )-> Union[str, Any]:
UpperCamelCase = A(x=1 , y="""foobar""" )
UpperCamelCase = {"""x""": 1, """y""": """foobar"""}
assert asdict(__UpperCamelCase ) == expected_output
UpperCamelCase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
UpperCamelCase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def lowercase__ ( __UpperCamelCase )-> List[Any]:
return text.split()
def lowercase__ ( __UpperCamelCase )-> List[str]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__ ( )-> int:
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__UpperCamelCase ) == 4
| 183 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_UpperCAmelCase : Dict = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , 'sklearn' )
return (preds == labels).mean()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , 'sklearn' )
snake_case_ = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , 'sklearn' )
snake_case_ = pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0]
snake_case_ = spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , 'sklearn' )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), F'''Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "qqp":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , 'sklearn' )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ )
| 285 |
from PIL import Image
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
_UpperCAmelCase : Tuple = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 285 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCAmelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_UpperCAmelCase : Union[str, Any] = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Dict = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase :str = self.diffusers_dir
shutil.copy(
os.path.join(_lowerCAmelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :List[Any] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] , _lowerCAmelCase: Any , _lowerCAmelCase: Any=None ):
lowercase :str = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowercase :Optional[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowercase :Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase :str = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase )
lowercase :Any = os.path.join(self.diffusers_dir , "new_code.py" )
with open(_lowerCAmelCase , "w" , newline="\n" ) as f:
f.write(_lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCAmelCase )
with open(_lowerCAmelCase , "r" ) as f:
self.assertTrue(f.read() , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Union[str, Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , _lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , _lowerCAmelCase ) , )
# Copy consistency with a really long name
lowercase :str = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub("Bert" , _lowerCAmelCase , _lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , _lowerCAmelCase , overwrite_result=re.sub("DDPM" , "Test" , _lowerCAmelCase ) , )
| 158 |
from math import factorial
class __lowerCAmelCase :
def __init__( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ):
lowercase :str = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Tuple = [1] * rank
else:
lowercase :List[str] = rank
def __repr__( self: Optional[Any] ):
return (
F"{self.real}+"
F"{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self: int , _lowerCAmelCase: Tuple ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
lowercase :Dict = self.duals.copy()
lowercase :List[str] = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
lowercase :Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
_a = __add__
def __sub__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
return self + other * -1
def __mul__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
lowercase :Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
_a = __mul__
def __truediv__( self: List[str] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self: int , _lowerCAmelCase: Any ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self: Any , _lowerCAmelCase: Tuple ):
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
lowercase :Optional[int] = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if not callable(lowerCamelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase, (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise ValueError("differentiate() requires an int as input for order" )
lowercase :str = Dual(lowerCamelCase, 1 )
lowercase :Dict = func(lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase__ ( lowerCamelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 158 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self : List[Any] , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : List[str] , ) -> None:
super().__init__(**__snake_case )
UpperCAmelCase : Any = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase : int = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case , param_name='''crop_size''' )
UpperCAmelCase : Any = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : str = resample
UpperCAmelCase : Tuple = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : str = do_rescale
UpperCAmelCase : int = rescale_factor
UpperCAmelCase : Union[str, Any] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : List[Any] = do_convert_rgb
def A ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray:
UpperCAmelCase : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__snake_case , size=size['''shortest_edge'''] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ) -> np.ndarray:
UpperCAmelCase : Tuple = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def A ( self : Any , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> List[str]:
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : List[Any] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Dict , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : List[str] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : int = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Optional[int] , ) -> PIL.Image.Image:
UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : str = size if size is not None else self.size
UpperCAmelCase : Dict = get_size_dict(__snake_case , param_name='''size''' , default_to_square=__snake_case )
UpperCAmelCase : List[str] = resample if resample is not None else self.resample
UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Any = get_size_dict(__snake_case , param_name='''crop_size''' , default_to_square=__snake_case )
UpperCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : List[str] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : str = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : Union[str, Any] = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase : int = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase : Optional[Any] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
UpperCAmelCase : Optional[int] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
UpperCAmelCase : int = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
UpperCAmelCase : List[Any] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 23 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> str:
UpperCAmelCase : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
UpperCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__snake_case ) , __snake_case )
def A ( self : int ) -> str:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__snake_case ) , x.transpose() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Any = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , np.asarray(transpose(__snake_case ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(__snake_case , axes=(1, 2, 0) ) ) ) )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.reshape(__snake_case , (4, 3) ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.reshape(__snake_case , (12, 5) ) ) )
@require_torch
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_tf
def A ( self : int ) -> List[str]:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Any ) -> Dict:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.asarray(reshape(__snake_case , (4, 3) ) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.asarray(reshape(__snake_case , (12, 5) ) ) ) )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.squeeze(__snake_case ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.squeeze(__snake_case , axis=2 ) ) )
@require_torch
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : str = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_tf
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_flax
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.asarray(squeeze(__snake_case ) ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.asarray(squeeze(__snake_case , axis=2 ) ) ) )
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.expand_dims(__snake_case , axis=1 ) ) )
@require_torch
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Any = tf.constant(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_flax
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase : str = jnp.array(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.asarray(expand_dims(__snake_case , axis=1 ) ) ) )
| 23 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = CLIPTokenizer
UpperCamelCase = CLIPTokenizerFast
UpperCamelCase = True
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
def _lowerCamelCase ( self : Optional[Any] , **A : str) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Any , **A : Dict) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_UpperCAmelCase = tokenizer.tokenize(A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , A)
@require_ftfy
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_UpperCAmelCase = 'xa\u0303y' + ' ' + 'x\xe3y'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of space type
_UpperCAmelCase = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of line break type
_UpperCAmelCase = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase = F"{text_of_1_token} {text_of_1_token}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A) + 1, len(A) + 1 + len(A)) , )
_UpperCAmelCase = F" {text}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A) + 1, 1 + len(A) + 1 + len(A)) , )
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
with self.assertRaises(A) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer')
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.'))
@require_ftfy
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass | 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ =logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["pixel_values"]
def __init__(self : Optional[int] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = True , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
__a : List[Any] = size if size is not None else {'''shortest_edge''': 2_2_4}
__a : List[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__a : Any = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__a : Dict = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='''crop_size''' )
__a : Optional[int] = do_resize
__a : Optional[int] = size
__a : Optional[int] = resample
__a : Any = do_center_crop
__a : Optional[Any] = crop_size
__a : Optional[Any] = do_rescale
__a : Tuple = rescale_factor
__a : Optional[Any] = do_normalize
__a : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__a : List[str] = do_convert_rgb
def lowerCAmelCase (self : Any , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ):
__a : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a : Any = get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ):
__a : Union[str, Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[Any] , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Tuple , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **snake_case_ : Optional[Any] , ):
__a : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__a : str = size if size is not None else self.size
__a : str = get_size_dict(snake_case_ , param_name='''size''' , default_to_square=snake_case_ )
__a : Tuple = resample if resample is not None else self.resample
__a : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Dict = crop_size if crop_size is not None else self.crop_size
__a : List[str] = get_size_dict(snake_case_ , param_name='''crop_size''' , default_to_square=snake_case_ )
__a : Dict = do_rescale if do_rescale is not None else self.do_rescale
__a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : str = do_normalize if do_normalize is not None else self.do_normalize
__a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__a : Any = image_std if image_std is not None else self.image_std
__a : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a : List[Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a : int = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
__a : Dict = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__a : Dict = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
__a : Tuple = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
__a : List[str] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__a : List[Any] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__a : Any = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__a : Tuple = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 216 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : str = len(lowerCAmelCase__ )
__a : Optional[int] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : str = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Tuple = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 216 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase : int = 1_000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 157 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
"""simple docstring"""
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=3 , lowercase_ : Tuple=32 , lowercase_ : List[str]=3 , lowercase_ : str=10 , lowercase_ : Tuple=[10, 20, 30, 40] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]="relu" , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=None , ):
UpperCamelCase__ : Tuple =parent
UpperCamelCase__ : str =batch_size
UpperCamelCase__ : List[Any] =image_size
UpperCamelCase__ : Union[str, Any] =num_channels
UpperCamelCase__ : Dict =embeddings_size
UpperCamelCase__ : Tuple =hidden_sizes
UpperCamelCase__ : List[Any] =depths
UpperCamelCase__ : List[Any] =is_training
UpperCamelCase__ : Union[str, Any] =use_labels
UpperCamelCase__ : Optional[int] =hidden_act
UpperCamelCase__ : Dict =num_labels
UpperCamelCase__ : List[str] =scope
UpperCamelCase__ : Optional[Any] =len(lowercase_ )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Tuple =None
if self.use_labels:
UpperCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Any =RegNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Union[str, Any] =model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
UpperCamelCase__ : List[str] =self.num_labels
UpperCamelCase__ : Optional[Any] =RegNetForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[int] =model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =config_and_inputs
UpperCamelCase__ : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Optional[int] =RegNetModelTester(self )
UpperCamelCase__ : Optional[int] =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowerCAmelCase ( self : Dict ):
pass
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ , UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(lowercase_ )
UpperCamelCase__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[str] =[*signature.parameters.keys()]
UpperCamelCase__ : str =['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ , UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _lowerCAmelCase ( self : List[Any] ):
def check_hidden_states_output(lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] ):
UpperCamelCase__ : int =model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : List[str] =model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCamelCase__ : Tuple =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Optional[int] =self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ : Dict =layer_type
UpperCamelCase__ : Dict =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] =RegNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.default_image_processor
UpperCamelCase__ : Any =prepare_img()
UpperCamelCase__ : Optional[Any] =image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict =model(**lowercase_ )
# verify the logits
UpperCamelCase__ : Union[str, Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCamelCase__ : Union[str, Any] =torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 157 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def _lowerCAmelCase ( A__: Path , A__: list ):
'''simple docstring'''
UpperCAmelCase = '''\n'''.join(A__ )
Path(A__ ).open('''w''' ).writelines(A__ )
__magic_name__ = "patrickvonplaten/t5-tiny-random"
__magic_name__ = "sshleifer/bart-tiny-random"
__magic_name__ = "sshleifer/tiny-mbart"
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case )
UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_snake_case , '''argv''' , _snake_case ):
run_generate()
assert Path(_snake_case ).exists()
# os.remove(Path(output_file_name))
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase = str(tmp_dir / '''scores.json''' )
UpperCAmelCase = str(tmp_dir / '''val.target''' )
_dump_articles(_snake_case , text['''en'''] )
_dump_articles(_snake_case , text['''de'''] )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{str(_snake_case )}
{str(_snake_case )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_snake_case , '''argv''' , _snake_case ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case ).exists()
os.remove(Path(_snake_case ) )
| 152 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
def UpperCamelCase_ ( *lowerCAmelCase__ : Any , **lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['torch'] )
def UpperCamelCase_ ( *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['torch'] )
def UpperCamelCase_ ( *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Tuple ) -> str:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['torch'] )
def UpperCamelCase_ ( *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['torch'] )
def UpperCamelCase_ ( *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['torch'] )
def UpperCamelCase_ ( *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['torch'] )
def UpperCamelCase_ ( *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['torch'] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""torch"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['torch'] )
| 224 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 100 ,) -> float:
__lowerCamelCase : Dict = x_start
__lowerCamelCase : int = fnc(_lowerCAmelCase )
__lowerCamelCase : Dict = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__lowerCamelCase : List[str] = (x_end - x_start) / steps + xa
__lowerCamelCase : List[Any] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
__lowerCamelCase : Any = xa
__lowerCamelCase : Tuple = fxa
return length
if __name__ == "__main__":
def a_ ( _lowerCAmelCase ) -> Dict:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_UpperCamelCase = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 208 | 0 |
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str = " " ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for index, char in enumerate(lowerCAmelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCAmelCase__ = index + 1
elif index + 1 == len(lowerCAmelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 354 |
from __future__ import annotations
UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _A ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[int]] , ):
"""simple docstring"""
lowerCAmelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the reference grid
lowerCAmelCase__ = 1
lowerCAmelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the action grid
lowerCAmelCase__ = init[0]
lowerCAmelCase__ = init[1]
lowerCAmelCase__ = 0
lowerCAmelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCAmelCase__ = [[f, g, x, y]]
lowerCAmelCase__ = False # flag that is set when search is complete
lowerCAmelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCAmelCase_ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCAmelCase__ = cell.pop()
lowerCAmelCase__ = next_cell[2]
lowerCAmelCase__ = next_cell[3]
lowerCAmelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCAmelCase__ = True
else:
for i in range(len(lowerCAmelCase_ ) ): # to try out different valid actions
lowerCAmelCase__ = x + DIRECTIONS[i][0]
lowerCAmelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCAmelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCAmelCase__ = g + cost
lowerCAmelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCAmelCase__ = 1
lowerCAmelCase__ = i
lowerCAmelCase__ = []
lowerCAmelCase__ = goal[0]
lowerCAmelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCAmelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCAmelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCAmelCase__ = xa
lowerCAmelCase__ = ya
invpath.append([x, y] )
lowerCAmelCase__ = []
for i in range(len(lowerCAmelCase_ ) ):
path.append(invpath[len(lowerCAmelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase = 99
UpperCamelCase , UpperCamelCase = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 221 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case__ (metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase :int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__lowercase , **__lowercase ) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class snake_case__ (metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__lowercase , **__lowercase ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class snake_case__ (metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__lowercase , **__lowercase ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class snake_case__ (metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase :str = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__lowercase , **__lowercase ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class snake_case__ (metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__lowercase , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class snake_case__ (metaclass=_a ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__lowercase , **__lowercase ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE__( cls , *__lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 170 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _a , unittest.TestCase ):
_A : str = CTRLTokenizer
_A : List[str] = False
_A : int = False
def __UpperCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE:Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE:Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE:str = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE:Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Tuple = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE:Any = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
| 139 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = " " ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 0
for index, char in enumerate(lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_lowerCAmelCase = index + 1
elif index + 1 == len(lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
A__ : int ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
A__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 220 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
if any(not isinstance(snake_case_,snake_case_ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(snake_case_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(snake_case_,sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 26 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : Any = 0.01
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
__UpperCAmelCase : List[Any] = time.time()
locka.acquire(lowerCAmelCase__ )
assert time.time() - _start > timeout
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : str = """a""" * 1000 + """.lock"""
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowerCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__UpperCAmelCase : Union[str, Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
locka.acquire(0 )
| 254 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Dict =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Union[str, Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Tuple =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Dict =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Union[str, Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Union[str, Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from itertools import permutations
def a__ ( lowercase : tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCamelCase = [7, 11, 13, 17]
for i, test in enumerate(lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def a__ ( lowercase : int = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(lowercase, lowercase ) ) )
for num in permutations(range(lowercase ) )
if is_substring_divisible(lowercase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 324 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 | 1 |
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = n
a : Dict = [None] * self.n
a : int = 0 # index of the first element
a : Optional[int] = 0
a : Optional[Any] = 0
def __len__( self : int):
"""simple docstring"""
return self.size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.size == 0
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
a : int = data
a : str = (self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW')
a : Union[str, Any] = self.array[self.front]
a : Optional[Any] = None
a : List[str] = (self.front + 1) % self.n
self.size -= 1
return temp
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
self.assertTrue(isinstance(dc.token_ids ,lowercase_ ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : Tuple ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(lowercase_ ) # fails here
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
lowerCAmelCase__ : int = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
lowerCAmelCase__ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = dc.update(3 )
lowerCAmelCase__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ : List[Any] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 106 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__snake_case : Optional[Any] = len(a_ ) - 1
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a_ ) , 5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : List[str] = self.basis_function(a_ )
__snake_case : str = 0.0
__snake_case : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE (self , a_ = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__snake_case : list[float] = [] # x coordinates of points to plot
__snake_case : list[float] = [] # y coordinates of points to plot
__snake_case : int = 0.0
while t <= 1:
__snake_case : Union[str, Any] = self.bezier_curve_function(a_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__snake_case : List[Any] = [i[0] for i in self.list_of_points]
__snake_case : Any = [i[1] for i in self.list_of_points]
plt.plot(
a_ , a_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(a_ , a_ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 102 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCAmelCase_ ( snake_case_ : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = Namespace(**checkpoint["cfg"]["model"] )
UpperCAmelCase_ = checkpoint["model"]
remove_ignore_keys_(snake_case_ )
UpperCAmelCase_ = state_dict["decoder.embed_tokens.weight"].shape[0]
UpperCAmelCase_ = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
UpperCAmelCase_ = XGLMConfig(
vocab_size=snake_case_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ = XGLMForCausalLM(snake_case_ )
UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
print(snake_case_ )
UpperCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 354 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Dict , __a : Any ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : int ):
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__a ) )
if isinstance(__a , __a ):
UpperCAmelCase_ = [sequences]
UpperCAmelCase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *__a : Optional[int] , **__a : List[str] ):
UpperCAmelCase_ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowercase (self : str ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowercase (self : Any , __a : Any , __a : int=True , __a : Dict=True , __a : Any=TruncationStrategy.ONLY_FIRST , **__a : Tuple ):
UpperCAmelCase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase_ = self.tokenizer.eos_token
try:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase (self : List[str] , **__a : Tuple ):
if kwargs.get("multi_class" , __a ) is not None:
UpperCAmelCase_ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase_ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase_ = kwargs["hypothesis_template"]
UpperCAmelCase_ = {}
if "multi_label" in kwargs:
UpperCAmelCase_ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self : Tuple , __a : Union[str, List[str]] , *__a : Optional[Any] , **__a : Tuple , ):
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase_ = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(__a , **__a )
def _lowercase (self : Optional[int] , __a : Optional[Any] , __a : List[str]=None , __a : Any="This example is {}." ):
UpperCAmelCase_ , UpperCAmelCase_ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCAmelCase_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = inputs["candidate_label"]
UpperCAmelCase_ = inputs["sequence"]
UpperCAmelCase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Tuple=False ):
UpperCAmelCase_ = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase_ = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase_ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase_ = logits.shape[0]
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = N // n
UpperCAmelCase_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase_ = self.entailment_id
UpperCAmelCase_ = -1 if entailment_id == 0 else 0
UpperCAmelCase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase_ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 106 | 0 |
"""simple docstring"""
from math import ceil
def __SCREAMING_SNAKE_CASE ( A_ = 10_01 ):
lowerCAmelCase__ : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase__ : int = 2 * i + 1
lowerCAmelCase__ : Any = 2 * i
lowerCAmelCase__ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCamelCase : Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 106 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 0 |
def A(__a: str ):
assert column_title.isupper()
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(lowerCAmelCase__ ) - 1
lowerCAmelCase_ = 0
while index >= 0:
lowerCAmelCase_ = (ord(column_title[index] ) - 64) * pow(26 , lowerCAmelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365 |
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A__(a_ ):
"""simple docstring"""
_A : str = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Dict = size if size is not None else {"""shortest_edge""": 224}
a_ : Any = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
a_ : Optional[int] = get_size_dict(_lowercase , default_to_square=_lowercase , param_name="""crop_size""" )
a_ : List[str] = do_resize
a_ : str = size
a_ : int = resample
a_ : int = do_center_crop
a_ : List[Any] = crop_size
a_ : Optional[int] = do_rescale
a_ : Optional[Any] = rescale_factor
a_ : Union[str, Any] = do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : int = do_convert_rgb
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
a_ : Optional[Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a_ : Tuple = get_resize_output_image_size(_lowercase , size=size["""shortest_edge"""] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
a_ : Tuple = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowercase , size=(size["""height"""], size["""width"""]) , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> List[Any]:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
a_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a_ : Dict = size if size is not None else self.size
a_ : str = get_size_dict(_lowercase , param_name="""size""" , default_to_square=_lowercase )
a_ : Union[str, Any] = resample if resample is not None else self.resample
a_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a_ : Union[str, Any] = get_size_dict(_lowercase , param_name="""crop_size""" , default_to_square=_lowercase )
a_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : int = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
a_ : int = image_std if image_std is not None else self.image_std
a_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : int = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
a_ : Union[str, Any] = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
a_ : Dict = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
a_ : List[Any] = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
a_ : List[str] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
a_ : int = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
a_ : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 248 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Optional[int] = """sshleifer/mar_enro_6_3_student"""
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
super().setUp()
a_ : Union[str, Any] = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=_lowercase , )
a_ : Union[str, Any] = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
MarianMTModel.from_pretrained(_lowercase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> int:
a_ : Any = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
a_ : List[str] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
a_ : Dict = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
a_ : Optional[int] = bash_script.replace(_lowercase , str(_lowercase ) )
a_ : int = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
a_ : Dict = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
a_ : Union[str, Any] = ["""finetune.py"""] + bash_script.split() + args
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : Optional[Any] = argparse.ArgumentParser()
a_ : Tuple = pl.Trainer.add_argparse_args(_lowercase )
a_ : Any = SummarizationModule.add_model_specific_args(_lowercase , os.getcwd() )
a_ : str = parser.parse_args()
a_ : Union[str, Any] = main(_lowercase )
# Check metrics
a_ : Any = load_json(model.metrics_save_path )
a_ : List[Any] = metrics["""val"""][0]
a_ : Union[str, Any] = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Optional[Any] = os.listdir(_lowercase )
a_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : str = os.path.join(args.output_dir , _lowercase )
a_ : Any = torch.load(_lowercase , map_location="""cpu""" )
a_ : Union[str, Any] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : List[Any] = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A__(a_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
a_ : str = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
a_ : Union[str, Any] = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
a_ : Union[str, Any] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
a_ : Any = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
a_ : Dict = bash_script.replace(_lowercase , str(_lowercase ) )
a_ : int = self.get_auto_remove_tmp_dir()
a_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
a_ : List[str] = 6
a_ : str = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : int = argparse.ArgumentParser()
a_ : Any = pl.Trainer.add_argparse_args(_lowercase )
a_ : str = SummarizationDistiller.add_model_specific_args(_lowercase , os.getcwd() )
a_ : Any = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
a_ : Dict = distill_main(_lowercase )
# Check metrics
a_ : Any = load_json(model.metrics_save_path )
a_ : int = metrics["""val"""][0]
a_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Dict = os.listdir(_lowercase )
a_ : List[Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : int = os.path.join(args.output_dir , _lowercase )
a_ : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""" )
a_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : List[str] = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 248 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __snake_case : str , __snake_case : Optional[int]=7 , __snake_case : int=3 , __snake_case : Tuple=30 , __snake_case : Optional[Any]=4_00 , __snake_case : str=True , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=0.9 , __snake_case : Any=None , __snake_case : int=True , __snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , __snake_case : Dict=[0.5, 0.5, 0.5] , ):
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize_and_center_crop
UpperCAmelCase_ = size
UpperCAmelCase_ = crop_pct
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self : Union[str, Any] ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''crop_pct''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
def lowerCamelCase_ ( self : List[str] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 0 |
import argparse
import os
import re
a ="""src/transformers"""
# Pattern that looks at the indentation in a line.
a =re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
a =re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a =re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
a =re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a =re.compile(r"""\[([^\]]+)\]""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : List[str] = _re_indent.search(lowerCamelCase__ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
__lowerCamelCase : int = 0
__lowerCamelCase : int = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase__ ):
index += 1
__lowerCamelCase : Dict = ['\n'.join(lines[:index] )]
else:
__lowerCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCamelCase : str = [lines[index]]
index += 1
while index < len(lowerCamelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowerCamelCase__ ) )
if index < len(lowerCamelCase__ ) - 1:
__lowerCamelCase : Dict = [lines[index + 1]]
index += 1
else:
__lowerCamelCase : List[str] = []
else:
blocks.append('\n'.join(lowerCamelCase__ ) )
__lowerCamelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase__ ) > 0:
blocks.append('\n'.join(lowerCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
def _inner(lowerCamelCase__ ):
return key(lowerCamelCase__ ).lower().replace('_' , '' )
return _inner
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(lowerCamelCase__ ):
return x
if key is None:
__lowerCamelCase : Dict = noop
# Constants are all uppercase, they go first.
__lowerCamelCase : List[Any] = [obj for obj in objects if key(lowerCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(lowerCamelCase__ )[0].isupper() and not key(lowerCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCamelCase : Union[str, Any] = [obj for obj in objects if not key(lowerCamelCase__ )[0].isupper()]
__lowerCamelCase : List[Any] = ignore_underscore(lowerCamelCase__ )
return sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
# This inner function sort imports between [ ].
def _replace(lowerCamelCase__ ):
__lowerCamelCase : Dict = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowerCamelCase : Any = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCamelCase : List[Any] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCamelCase__ )] ) + "]"
__lowerCamelCase : int = import_statement.split('\n' )
if len(lowerCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCamelCase : Any = 2 if lines[1].strip() == '[' else 1
__lowerCamelCase : Optional[Any] = [(i, _re_strip_line.search(lowerCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCamelCase : Tuple = sort_objects(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )
__lowerCamelCase : Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCamelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCamelCase : int = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCamelCase : Optional[int] = keys[:-1]
__lowerCamelCase : List[str] = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(lowerCamelCase__ )] )
return "\n".join(lowerCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__lowerCamelCase : Any = _re_bracket_content.sub(_replace , lowerCamelCase__ )
return import_statement
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=True ) -> Dict:
with open(lowerCamelCase__ , encoding='utf-8' ) as f:
__lowerCamelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCamelCase : str = split_code_in_indented_blocks(
lowerCamelCase__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCamelCase : Optional[int] = main_blocks[block_idx]
__lowerCamelCase : str = block.split('\n' )
# Get to the start of the imports.
__lowerCamelCase : Union[str, Any] = 0
while line_idx < len(lowerCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCamelCase : List[Any] = len(lowerCamelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCamelCase : Dict = '\n'.join(block_lines[line_idx:-1] )
__lowerCamelCase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCamelCase : Any = split_code_in_indented_blocks(lowerCamelCase__ , indent_level=lowerCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCamelCase : Optional[Any] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCamelCase : Optional[Any] = [(pattern.search(lowerCamelCase__ ).groups()[0] if pattern.search(lowerCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(lowerCamelCase__ ) if key is not None]
__lowerCamelCase : str = [x[0] for x in sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowerCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__lowerCamelCase : Tuple = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=True ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
__lowerCamelCase : Optional[Any] = sort_imports(os.path.join(lowerCamelCase__ , '__init__.py' ) , check_only=lowerCamelCase__ )
if result:
__lowerCamelCase : Tuple = [os.path.join(lowerCamelCase__ , '__init__.py' )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"Would overwrite {len(lowerCamelCase__ )} files, run `make style`." )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 73 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 0 |
'''simple docstring'''
import numpy as np
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> np.ndarray:
return np.where(vector > 0 , __UpperCamelCase , (alpha * (np.exp(__UpperCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('transformers.models.speecht5')
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
hf_model.apply_weight_norm()
UpperCamelCase = checkpoint["""input_conv.weight_g"""]
UpperCamelCase = checkpoint["""input_conv.weight_v"""]
UpperCamelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
UpperCamelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , )-> List[Any]:
if config_path is not None:
UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = SpeechTaHifiGanConfig()
UpperCamelCase = SpeechTaHifiGan(__UpperCamelCase )
UpperCamelCase = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase = np.load(__UpperCamelCase )
UpperCamelCase = stats[0].reshape(-1 )
UpperCamelCase = stats[1].reshape(-1 )
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 183 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
lowerCamelCase_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = '''left'''
def __init__( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Dict="<unk>" , __UpperCAmelCase : Tuple="<sep>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : int="<cls>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Any]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_A = 3
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if self.remove_space:
_A = " ".join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_A = unicodedata.normalize("NFKD" , __UpperCAmelCase )
_A = "".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.preprocess_text(__UpperCAmelCase )
_A = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_A = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = kwargs.pop("use_source_tokenizer" , __UpperCAmelCase )
_A = self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_A = []
_A = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_A = []
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_A = "".join(__UpperCAmelCase )
_A = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_A = self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 79 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowerCamelCase ( __magic_name__ : int = 3 ):
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(__magic_name__ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
a__: str =QuantumRegister(__magic_name__ , "qr" )
a__: Any =ClassicalRegister(__magic_name__ , "cr" )
a__: Dict =QuantumCircuit(__magic_name__ , __magic_name__ )
a__: Any =number_of_qubits
for i in range(__magic_name__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__magic_name__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__magic_name__ , __magic_name__ )
# simulate with 10000 shots
a__: Tuple =Aer.get_backend("qasm_simulator" )
a__: Tuple =execute(__magic_name__ , __magic_name__ , shots=10_000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 42 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase ( __magic_name__ : List[Any] ):
if hor == 128:
a__: Union[str, Any] =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: Optional[int] =(32, 128, 256)
a__: Tuple =("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a__: Tuple =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: List[Any] =(32, 64, 128, 256)
a__: List[str] =("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a__: Dict =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
a__: Optional[int] =model.state_dict()
a__: str ={
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a__: List[Any] =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: Tuple =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: Dict =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def __lowerCamelCase ( ):
a__: Union[str, Any] ={
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a__: Union[str, Any] =torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
a__: Any =model
a__: str =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: List[str] =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: List[Any] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
__A : List[str] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowercase ( __snake_case : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(__snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__snake_case ) != count_coins(__snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__snake_case : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase_ , lowercase_ : List[Any] = get_distrib(node.left )
lowercase_ , lowercase_ : Optional[Any] = get_distrib(node.right )
lowercase_ : Any = 1 - left_distrib_excess
lowercase_ : List[Any] = 1 - right_distrib_excess
lowercase_ : Optional[int] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__snake_case )
+ abs(__snake_case )
)
lowercase_ : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__snake_case , __snake_case )
return get_distrib(__snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
def A_ ( A__ , A__ ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
a__ : List[str] = str(bin(A__ ) )[2:] # remove the leading "0b"
a__ : Optional[int] = str(bin(A__ ) )[2:] # remove the leading "0b"
a__ : List[str] = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int:
'''simple docstring'''
A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE_ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE_ )
[x.remove() for x in self.handles]
return self
@property
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : nn.Module
UpperCamelCase_ : int = 0
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]:
'''simple docstring'''
A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized
A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized
A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) )
A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while"""
f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any:
print(F"""Converting {name}...""" )
with torch.no_grad():
A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval()
A: List[str] = ResNetForImageClassification(__lowercase ).eval()
A: int = ModuleTransfer(src=__lowercase , dest=__lowercase )
A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowercase )
assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one."
A: str = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__lowercase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , )
# we can use the convnext one
A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]:
A: Union[str, Any] = '''imagenet-1k-id2label.json'''
A: Union[str, Any] = 1_0_0_0
A: Optional[int] = (1, num_labels)
A: Dict = '''huggingface/label-files'''
A: Any = num_labels
A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Optional[int] = idalabel
A: List[str] = {v: k for k, v in idalabel.items()}
A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
A: Optional[Any] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 359 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase = 4 ) -> list[list[int]]:
A: Tuple = abs(__lowercase ) or 4
return [[1 + x + y * row_size for x in range(__lowercase )] for y in range(__lowercase )]
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
return reverse_row(transpose(__lowercase ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
return reverse_row(reverse_column(__lowercase ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
return reverse_column(transpose(__lowercase ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
A: Union[str, Any] = [list(__lowercase ) for x in zip(*__lowercase )]
return matrix
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
A: Optional[int] = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]:
A: Optional[Any] = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
for i in matrix:
print(*__lowercase )
if __name__ == "__main__":
UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
UpperCamelCase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 334 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowerCamelCase : int = TypeVar("_T")
class SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Iterable[_T] | None = None ):
"""simple docstring"""
UpperCamelCase = list(iterable or [] )
UpperCamelCase = []
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def A ( self : List[Any] , UpperCamelCase__ : _T ):
"""simple docstring"""
self._stacka.append(UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self._stacka.pop
UpperCamelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 28 |
# Imports
import numpy as np
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if red is not None:
UpperCamelCase_ = red
if green is not None:
UpperCamelCase_ = green
if blue is not None:
UpperCamelCase_ = blue
if red_edge is not None:
UpperCamelCase_ = red_edge
if nir is not None:
UpperCamelCase_ = nir
return True
def _lowerCamelCase ( self , snake_case__="" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
UpperCamelCase_ = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowerCamelCase ( self ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowerCamelCase ( self ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def _lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowerCamelCase ( self , snake_case__=0.08 , snake_case__=1.22 , snake_case__=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.green) - 1
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir - self.green
def _lowerCamelCase ( self ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _lowerCamelCase ( self , snake_case__=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowerCamelCase ( self , snake_case__=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowerCamelCase ( self ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 128 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : List[str] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
for attribute in key.split('''.'''):
__lowerCAmelCase = getattr(_lowerCAmelCase, _lowerCAmelCase)
if weight_type is not None:
__lowerCAmelCase = getattr(_lowerCAmelCase, _lowerCAmelCase).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""")
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCAmelCase = None
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
__lowerCAmelCase = True
elif name.split('''.''')[0] == "proj":
__lowerCAmelCase = fairseq_model.proj
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(_lowerCAmelCase)[0].split('''.''')[-2]
__lowerCAmelCase = mapped_key.replace('''*''', _lowerCAmelCase)
if "weight_g" in name:
__lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCAmelCase = '''weight_v'''
elif "bias" in name:
__lowerCAmelCase = '''bias'''
elif "weight" in name:
__lowerCAmelCase = '''weight'''
else:
__lowerCAmelCase = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase)
continue
if not is_used:
unused_weights.append(_lowerCAmelCase)
logger.warning(F"""Unused weights: {unused_weights}""")
return proj_weight
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = full_name.split('''conv_layers.''')[-1]
__lowerCAmelCase = name.split('''.''')
__lowerCAmelCase = int(items[0])
__lowerCAmelCase = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(_lowerCAmelCase)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase)
__lowerCAmelCase = emb.weight.data
return lin_layer
def __magic_name__( lowerCamelCase):
with open(_lowerCAmelCase, '''r''', encoding='''utf-8''') as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = [line.split(''' ''')[0] for line in lines]
__lowerCAmelCase = len(_lowerCAmelCase)
__lowerCAmelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase, range(4, num_words + 4))))
return vocab_dict
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
__lowerCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase)
__lowerCAmelCase = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase, vocab_size=_lowerCAmelCase, decoder_layers=_lowerCAmelCase, do_stable_layer_norm=_lowerCAmelCase)
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=_lowerCAmelCase, return_attention_mask=_lowerCAmelCase, )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])})
__lowerCAmelCase = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCAmelCase = WavaVecaModel(_lowerCAmelCase)
__lowerCAmelCase = recursively_load_weights_wavaveca(model.encoder, _lowerCAmelCase)
__lowerCAmelCase = SpeechaTextaForCausalLM(_lowerCAmelCase)
__lowerCAmelCase , __lowerCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=_lowerCAmelCase)
# set output linear layer
unexpected_keys.remove('''embed_out''')
__lowerCAmelCase = nn.Parameter(model.decoder.embed_out.detach())
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""")
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""")
__lowerCAmelCase = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase, decoder=_lowerCAmelCase)
__lowerCAmelCase = False
# add projection layer
__lowerCAmelCase = nn.Parameter(projection_layer.weight)
__lowerCAmelCase = nn.Parameter(projection_layer.bias)
__lowerCAmelCase = create_vocab_dict(_lowerCAmelCase)
with open(os.path.join(_lowerCAmelCase, '''vocab.json'''), '''w''') as fp:
json.dump(_lowerCAmelCase, _lowerCAmelCase)
__lowerCAmelCase = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase, '''vocab.json'''))
tokenizer.save_pretrained(_lowerCAmelCase)
__lowerCAmelCase = hf_wavavec.config.to_dict()
__lowerCAmelCase = tokenizer.pad_token_id
__lowerCAmelCase = tokenizer.bos_token_id
__lowerCAmelCase = tokenizer.eos_token_id
__lowerCAmelCase = '''speech_to_text_2'''
__lowerCAmelCase = '''wav2vec2'''
__lowerCAmelCase = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase)
hf_wavavec.save_pretrained(_lowerCAmelCase)
feature_extractor.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 353 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {"""vocab_file""": """sentencepiece.model"""}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
a_ : str = {
"""google/rembert""": 256,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="[CLS]" , UpperCamelCase="[SEP]" , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.EncodeAsPieces(UpperCamelCase )
return pieces
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.decode_pieces(UpperCamelCase )
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase ) )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 55 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
__lowercase: Dict = "path-to-your-trained-model"
__lowercase: Tuple = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__lowercase: Union[str, Any] = "A photo of sks dog in a bucket"
__lowercase: List[Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png") | 367 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323") | 31 | 0 |
from __future__ import annotations
def __lowercase ( a__ , a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
__SCREAMING_SNAKE_CASE = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
lowerCAmelCase__ : Any ={
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 257 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
UpperCamelCase = 4
UpperCamelCase = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 153 | 0 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int = 101 ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = length
def __len__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.length
def __getitem__( self : Optional[Any] ,_snake_case : int ) -> int:
"""simple docstring"""
return i
class __A :
'''simple docstring'''
def __call__( self : Tuple ,_snake_case : Any ) -> Any:
"""simple docstring"""
return {"input_ids": torch.tensor(_snake_case ), "labels": torch.tensor(_snake_case )}
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__ : Dict = nn.Linear(120 ,80 )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class __A ( A_ ):
'''simple docstring'''
@require_torch_neuroncore
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ : Dict = self.get_auto_remove_tmp_dir()
lowercase__ : int = f"""--output_dir {output_dir}""".split()
lowercase__ : List[str] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __A ( A_ ):
'''simple docstring'''
@require_torch_multi_gpu
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
lowercase__ : Tuple = self.get_auto_remove_tmp_dir()
lowercase__ : Optional[int] = f"""--output_dir {output_dir}""".split()
lowercase__ : List[str] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(_snake_case ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase_ = HfArgumentParser((TrainingArguments,))
lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCAmelCase_ = DummyDataset(dataset_length)
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : Tuple = list(range(len(__lowerCamelCase ) ) )
lowercase__ : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowerCAmelCase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_ = 2
lowerCAmelCase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase_ = None
| 302 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase : Any =False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : str = pipe.dual_guided(
prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.7_5 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = generator.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = pipe.dual_guided(
prompt="first prompt" , image=_UpperCAmelCase , text_to_image_strength=0.7_5 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = "cyberpunk 2077"
UpperCamelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Any = pipe.dual_guided(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.7_5 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Optional[int] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Dict = "A painting of a squirrel eating a burger "
UpperCamelCase__ : Any = torch.manual_seed(0 )
UpperCamelCase__ : List[str] = pipe.text_to_image(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Union[str, Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : List[str] = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type="numpy" ).images
UpperCamelCase__ : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : str = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 189 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=100 , lowerCAmelCase_ : int=13 , lowerCAmelCase_ : Union[str, Any]=30 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : str=3 , ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Dict = (image_size // patch_size) ** 2
UpperCAmelCase_ : Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Tuple:
UpperCAmelCase_ : List[Any] = FlaxBeitModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> List[Any]:
UpperCAmelCase_ : Tuple = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase_ )
UpperCAmelCase_ : str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Dict:
UpperCAmelCase_ : Any = self.type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = FlaxBeitForImageClassification(config=lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : str = FlaxBeitForImageClassification(lowerCAmelCase_ )
UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Dict = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
UpperCAmelCase_ : int = FlaxBeitModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[str] = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : str = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
UpperCAmelCase_ : Union[str, Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Any = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=lowerCAmelCase_ , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase_ : int = np.ones((1, 196) , dtype=lowerCAmelCase_ )
# forward pass
UpperCAmelCase_ : int = model(pixel_values=lowerCAmelCase_ , bool_masked_pos=lowerCAmelCase_ )
UpperCAmelCase_ : str = outputs.logits
# verify the logits
UpperCAmelCase_ : str = (1, 196, 8_192)
self.assertEqual(logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -13.9_174], [-3.2_4_5_6, 0.4_9_4_8, -13.9_401], [-3.2_0_3_3, 0.5_1_2_1, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase_ , atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Tuple = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
UpperCAmelCase_ : int = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=lowerCAmelCase_ , return_tensors="np" )
# forward pass
UpperCAmelCase_ : List[Any] = model(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = outputs.logits
# verify the logits
UpperCAmelCase_ : Optional[int] = (1, 1_000)
self.assertEqual(logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
UpperCAmelCase_ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="np" )
# forward pass
UpperCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Any = (1, 21_841)
self.assertEqual(logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : int = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
UpperCAmelCase_ : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase_ )
| 367 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCamelCase_ = {'''bert_for_seq_generation''': 512}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = []
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Tuple="<::::>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None:
UpperCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : List[str] = vocab_file
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : List[Any] = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 253 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : torch.Tensor # [batch_size x 3]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
_UpperCAmelCase : float
_UpperCAmelCase : Tuple[int]
def lowerCAmelCase ( self : Optional[int]):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def lowerCAmelCase ( self : str):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa))
def lowerCAmelCase ( self : int):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : int = torch.arange(self.height * self.width)
__lowerCamelCase : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(SCREAMING_SNAKE_CASE__ ,self.width ,rounding_mode='trunc'),
] ,axis=1 ,)
return coords
@property
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , *__lowerCamelCase : Union[str, Any] = self.shape
__lowerCamelCase : Optional[Any] = int(np.prod(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = self.get_image_coords()
__lowerCamelCase : List[str] = torch.broadcast_to(coords.unsqueeze(0) ,[batch_size * inner_batch_size, *coords.shape])
__lowerCamelCase : Any = self.get_camera_rays(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = rays.view(SCREAMING_SNAKE_CASE__ ,inner_batch_size * self.height * self.width ,2 ,3)
return rays
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase : Tuple = coords.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Optional[int] = self.resolution()
__lowerCamelCase : Optional[int] = self.fov()
__lowerCamelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase : int = fracs * torch.tan(fov / 2)
__lowerCamelCase : int = fracs.view(SCREAMING_SNAKE_CASE__ ,-1 ,2)
__lowerCamelCase : Union[str, Any] = (
self.z.view(SCREAMING_SNAKE_CASE__ ,1 ,3)
+ self.x.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, :1]
+ self.y.view(SCREAMING_SNAKE_CASE__ ,1 ,3) * fracs[:, :, 1:]
)
__lowerCamelCase : Tuple = directions / directions.norm(dim=-1 ,keepdim=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(SCREAMING_SNAKE_CASE__ ,1 ,3) ,[batch_size, directions.shape[1], 3]),
directions,
] ,dim=2 ,)
return rays.view(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,2 ,3)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> DifferentiableProjectiveCamera:
__lowerCamelCase : Dict = []
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
__lowerCamelCase : Dict = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase : List[str] = -z * 4
__lowerCamelCase : List[str] = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
__lowerCamelCase : int = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 73 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 0 |
from __future__ import annotations
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = list(range(len(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = [v / w for v, w in zip(_UpperCAmelCase , _UpperCAmelCase )]
index.sort(key=lambda _UpperCAmelCase : ratio[i] , reverse=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: float = 0
SCREAMING_SNAKE_CASE_: list[float] = [0] * len(_UpperCAmelCase )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE_: int = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE_: Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = -1
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: int = TextStreamer(lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.decode(greedy_ids[0])
SCREAMING_SNAKE_CASE_: int = TextIteratorStreamer(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Dict = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Any = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("distilgpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = -1
SCREAMING_SNAKE_CASE_: List[str] = torch.ones((1, 5) , device=lowerCAmelCase__).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Union[str, Any] = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: str = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = -1
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001)
SCREAMING_SNAKE_CASE_: Any = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Optional[Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = ""
for new_text in streamer:
streamer_text += new_text
| 127 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Any = WavaVecaForSequenceClassification.from_pretrained(A_ , config=A_ )
lowerCAmelCase__ : List[Any] = downstream_dict['''projector.weight''']
lowerCAmelCase__ : Tuple = downstream_dict['''projector.bias''']
lowerCAmelCase__ : Dict = downstream_dict['''model.post_net.linear.weight''']
lowerCAmelCase__ : Optional[int] = downstream_dict['''model.post_net.linear.bias''']
return model
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(A_ , config=A_ )
lowerCAmelCase__ : Optional[int] = downstream_dict['''model.linear.weight''']
lowerCAmelCase__ : Tuple = downstream_dict['''model.linear.bias''']
return model
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : List[Any] = WavaVecaForXVector.from_pretrained(A_ , config=A_ )
lowerCAmelCase__ : int = downstream_dict['''connector.weight''']
lowerCAmelCase__ : str = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase__ : List[str] = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowerCAmelCase__ : List[str] = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowerCAmelCase__ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowerCAmelCase__ : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowerCAmelCase__ : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowerCAmelCase__ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowerCAmelCase__ : str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : str = torch.load(A_ , map_location='''cpu''' )
lowerCAmelCase__ : int = checkpoint['''Downstream''']
lowerCAmelCase__ : Dict = WavaVecaConfig.from_pretrained(A_ )
lowerCAmelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(
A_ , return_attention_mask=A_ , do_normalize=A_ )
lowerCAmelCase__ : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowerCAmelCase__ : List[Any] = convert_classification(A_ , A_ , A_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowerCAmelCase__ : Any = convert_diarization(A_ , A_ , A_ )
elif arch.endswith('''ForXVector''' ):
lowerCAmelCase__ : Optional[Any] = convert_xvector(A_ , A_ , A_ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase__ : Optional[Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__UpperCamelCase : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 106 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 0 |
"""simple docstring"""
import argparse
import os
import re
SCREAMING_SNAKE_CASE__:Optional[int] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE__:Union[str, Any] = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE__:List[Any] = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE__:Optional[Any] = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE__:Union[str, Any] = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE__:Any = re.compile(R"""\[([^\]]+)\]""")
def _lowerCamelCase( a ):
__a = _re_indent.search(a )
return "" if search is None else search.groups()[0]
def _lowerCamelCase( a , a="" , a=None , a=None ):
__a = 0
__a = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(a ):
index += 1
__a = ["\n".join(lines[:index] )]
else:
__a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__a = [lines[index]]
index += 1
while index < len(a ) and (end_prompt is None or not lines[index].startswith(a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(a ) )
if index < len(a ) - 1:
__a = [lines[index + 1]]
index += 1
else:
__a = []
else:
blocks.append("\n".join(a ) )
__a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a ) > 0:
blocks.append("\n".join(a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _lowerCamelCase( a ):
def _inner(a ):
return key(a ).lower().replace("_" , "" )
return _inner
def _lowerCamelCase( a , a=None ):
# If no key is provided, we use a noop.
def noop(a ):
return x
if key is None:
__a = noop
# Constants are all uppercase, they go first.
__a = [obj for obj in objects if key(a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__a = [obj for obj in objects if key(a )[0].isupper() and not key(a ).isupper()]
# Functions begin with a lowercase, they go last.
__a = [obj for obj in objects if not key(a )[0].isupper()]
__a = ignore_underscore(a )
return sorted(a , key=a ) + sorted(a , key=a ) + sorted(a , key=a )
def _lowerCamelCase( a ):
# This inner function sort imports between [ ].
def _replace(a ):
__a = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__a = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(a )] ) + "]"
__a = import_statement.split("\n" )
if len(a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__a = 2 if lines[1].strip() == "[" else 1
__a = [(i, _re_strip_line.search(a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__a = sort_objects(a , key=lambda a : x[1] )
__a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__a = _re_bracket_content.sub(_replace , lines[1] )
else:
__a = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
__a = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(a )] )
return "\n".join(a )
else:
# Finally we have to deal with imports fitting on one line
__a = _re_bracket_content.sub(_replace , a )
return import_statement
def _lowerCamelCase( a , a=True ):
with open(a , "r" ) as f:
__a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__a = split_code_in_indented_blocks(
a , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__a = main_blocks[block_idx]
__a = block.split("\n" )
# Get to the start of the imports.
__a = 0
while line_idx < len(a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__a = len(a )
else:
line_idx += 1
if line_idx >= len(a ):
continue
# Ignore beginning and last line: they don't contain anything.
__a = "\n".join(block_lines[line_idx:-1] )
__a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__a = split_code_in_indented_blocks(a , indent_level=a )
# We have two categories of import key: list or _import_structure[key].append/extend
__a = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__a = [(pattern.search(a ).groups()[0] if pattern.search(a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__a = [(i, key) for i, key in enumerate(a ) if key is not None]
__a = [x[0] for x in sorted(a , key=lambda a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__a = 0
__a = []
for i in range(len(a ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a )
count += 1
# And we put our main block back together with its first and last line.
__a = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(a , "w" ) as f:
f.write("\n".join(a ) )
def _lowerCamelCase( a=True ):
__a = []
for root, _, files in os.walk(a ):
if "__init__.py" in files:
__a = sort_imports(os.path.join(a , "__init__.py" ) , check_only=a )
if result:
__a = [os.path.join(a , "__init__.py" )]
if len(a ) > 0:
raise ValueError(F"Would overwrite {len(a )} files, run `make style`." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:str = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 268 | """simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__:Any = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
SCREAMING_SNAKE_CASE__:Optional[int] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
SCREAMING_SNAKE_CASE__:Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _lowerCamelCase( a ):
__a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _lowerCamelCase( a ):
return x[0]
def _lowerCamelCase( a ):
__a = get_letter_count(a )
__a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(a )
__a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=a )
__a = "".join(freq_to_letter[freq] )
__a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=a , reverse=a )
__a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(a )
def _lowerCamelCase( a ):
__a = get_frequency_order(a )
__a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase ( A_ ):
A__ : torch.FloatTensor
A__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=0.999 , __lowerCamelCase : Optional[Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
snake_case : Optional[Any] = []
for i in range(__lowerCamelCase ):
snake_case : List[str] = i / num_diffusion_timesteps
snake_case : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ) , __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase , dtype=torch.floataa )
class UpperCAmelCase ( A_ ,A_ ):
@register_to_config
def __init__(self : str , snake_case__ : int = 10_00 , snake_case__ : str = "fixed_small_log" , snake_case__ : bool = True , snake_case__ : Optional[float] = 1.0 , snake_case__ : str = "epsilon" , snake_case__ : str = "squaredcos_cap_v2" , ) -> List[str]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
snake_case : Union[str, Any] = betas_for_alpha_bar(snake_case__ )
snake_case : str = 1.0 - self.betas
snake_case : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
snake_case : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case : Optional[Any] = 1.0
# setable values
snake_case : Optional[Any] = None
snake_case : List[str] = torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
snake_case : str = variance_type
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ) -> int:
'''simple docstring'''
snake_case : int = num_inference_steps
snake_case : Tuple = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case : Union[str, Any] = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case : List[str] = torch.from_numpy(snake_case__ ).to(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
if prev_timestep is None:
snake_case : Optional[int] = t - 1
snake_case : Union[str, Any] = self.alphas_cumprod[t]
snake_case : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case : Union[str, Any] = 1 - alpha_prod_t
snake_case : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case : List[str] = self.betas[t]
else:
snake_case : Any = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case : Union[str, Any] = torch.log(torch.clamp(snake_case__ , min=1e-20 ) )
snake_case : Any = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case : Union[str, Any] = variance.log()
snake_case : Union[str, Any] = beta.log()
snake_case : Optional[Any] = (predicted_variance + 1) / 2
snake_case : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None , snake_case__ : int=None , snake_case__ : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case , snake_case : int = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
snake_case : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case : Any = t - 1
snake_case : int = self.alphas_cumprod[t]
snake_case : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case : str = 1 - alpha_prod_t
snake_case : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case : int = self.betas[t]
snake_case : Union[str, Any] = self.alphas[t]
else:
snake_case : str = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case : List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case : Any = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case : List[Any] = torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case : str = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case : Tuple = 0
if t > 0:
snake_case : Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
snake_case : str = self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
snake_case : Optional[Any] = variance
elif self.variance_type == "learned_range":
snake_case : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
snake_case : Any = variance * variance_noise
snake_case : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.IntTensor , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case : Optional[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case : List[Any] = timesteps.to(original_samples.device )
snake_case : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case : Tuple = sqrt_alpha_prod.unsqueeze(-1 )
snake_case : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 59 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCamelCase : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(_a )
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : int = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Union[str, Any] , _a : Any , _a : List[str]=0 ) -> List[str]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Any = torch.manual_seed(_a )
else:
__lowerCamelCase : Any = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
__lowerCamelCase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : int = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : List[str] ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : str ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : Any ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowercase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : str ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Dict = CLIPTextModel(_a )
__lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : List[str] = MultiControlNetModel([controlneta, controlneta] )
__lowerCamelCase : str = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Dict , _a : int , _a : Union[str, Any]=0 ) -> Union[str, Any]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(_a )
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Any = 2
__lowerCamelCase : Any = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
__lowerCamelCase : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Dict = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**_a )
pipe.to(_a )
__lowerCamelCase : Optional[int] = 10.0
__lowerCamelCase : Tuple = 4
__lowerCamelCase : str = self.get_dummy_inputs(_a )
__lowerCamelCase : int = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Optional[Any] = pipe(**_a )[0]
__lowerCamelCase : Dict = self.get_dummy_inputs(_a )
__lowerCamelCase : List[str] = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Dict = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Union[str, Any] = steps
__lowerCamelCase : Any = scale
__lowerCamelCase : Optional[int] = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Tuple = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : List[str] = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowercase ( self : Optional[Any] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : Dict ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : str ) -> int:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowercase ( self : List[Any] ) -> str:
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : List[Any] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCamelCase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
__lowerCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase : List[Any] = 'evil space-punk bird'
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__lowerCamelCase : Dict = pipe(
_a , _a , control_image=_a , generator=_a , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 208 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a = logging.get_logger(__name__) # pylint: disable=invalid-name
a = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : str=8 ) -> Dict:
'''simple docstring'''
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase_ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
if latents is None:
_A = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A = latents.to(lowercase_ )
_A = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_A = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : Dict ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : int , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 100 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ):
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
_A = torch.cat(lowercase_ , dim=0 )
_A = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
_A = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(lowercase_ , dim=0 )
_A = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
_A = self.scheduler.timesteps
_A = self.unet.config.in_channels
_A = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
_A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {"""image_embeds""": image_embeds}
_A = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
_A = noise_pred.split(latents.shape[1] , dim=1 )
_A = noise_pred.chunk(2 )
_A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
_A = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 371 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _snake_case ( _snake_case : int = 8 ) -> str:
'''simple docstring'''
_A = ascii_letters + digits + punctuation
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
i -= len(_snake_case )
_A = i // 3
_A = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_A = (
chars_incl
+ random(_snake_case , quotient + remainder )
+ random(_snake_case , _snake_case )
+ random(_snake_case , _snake_case )
)
_A = list(_snake_case )
shuffle(_snake_case )
return "".join(_snake_case )
# random is a generalised function for letters, characters and numbers
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Any , _snake_case : str ) -> Dict:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : str , _snake_case : int = 8 ) -> bool:
'''simple docstring'''
if len(_snake_case ) < min_length:
# Your Password must be at least 8 characters long
return False
_A = any(char in ascii_uppercase for char in password )
_A = any(char in ascii_lowercase for char in password )
_A = any(char in digits for char in password )
_A = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = int(input('Please indicate the max length of your password: ' ).strip() )
_A = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_snake_case ) )
print(
'Alternative Password generated:' , alternative_password_generator(_snake_case , _snake_case ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 271 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class _snake_case ( _lowercase ):
lowerCamelCase__: List[Any] = """layoutlmv3"""
def __init__( self: List[Any] , __lowerCamelCase: str=5_02_65 , __lowerCamelCase: Optional[Any]=7_68 , __lowerCamelCase: int=12 , __lowerCamelCase: Dict=12 , __lowerCamelCase: Tuple=30_72 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: str=2 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: int=1e-5 , __lowerCamelCase: Any=1 , __lowerCamelCase: Dict=0 , __lowerCamelCase: str=2 , __lowerCamelCase: str=10_24 , __lowerCamelCase: List[str]=1_28 , __lowerCamelCase: List[Any]=1_28 , __lowerCamelCase: str=True , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Tuple=1_28 , __lowerCamelCase: Optional[int]=64 , __lowerCamelCase: Optional[Any]=2_56 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Tuple=2_24 , __lowerCamelCase: int=3 , __lowerCamelCase: int=16 , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: str , ) -> Optional[int]:
super().__init__(
vocab_size=_A , hidden_size=_A , num_hidden_layers=_A , num_attention_heads=_A , intermediate_size=_A , hidden_act=_A , hidden_dropout_prob=_A , attention_probs_dropout_prob=_A , max_position_embeddings=_A , type_vocab_size=_A , initializer_range=_A , layer_norm_eps=_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
__UpperCAmelCase : str = max_ad_position_embeddings
__UpperCAmelCase : List[Any] = coordinate_size
__UpperCAmelCase : Any = shape_size
__UpperCAmelCase : List[Any] = has_relative_attention_bias
__UpperCAmelCase : int = rel_pos_bins
__UpperCAmelCase : int = max_rel_pos
__UpperCAmelCase : List[Any] = has_spatial_attention_bias
__UpperCAmelCase : Optional[int] = rel_ad_pos_bins
__UpperCAmelCase : Optional[int] = max_rel_ad_pos
__UpperCAmelCase : Optional[Any] = text_embed
__UpperCAmelCase : Tuple = visual_embed
__UpperCAmelCase : Optional[int] = input_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : int = classifier_dropout
class _snake_case ( _lowercase ):
lowerCamelCase__: List[str] = version.parse("1.12" )
@property
def _lowerCamelCase ( self: int ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def _lowerCamelCase ( self: str ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: Tuple ) -> int:
return 12
def _lowerCamelCase ( self: int , __lowerCamelCase: "ProcessorMixin" , __lowerCamelCase: int = -1 , __lowerCamelCase: int = -1 , __lowerCamelCase: bool = False , __lowerCamelCase: Optional["TensorType"] = None , __lowerCamelCase: int = 3 , __lowerCamelCase: int = 40 , __lowerCamelCase: int = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , "apply_ocr" , _A )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCAmelCase : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCAmelCase : Tuple = processor.tokenizer.num_special_tokens_to_add(_A )
__UpperCAmelCase : Tuple = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__UpperCAmelCase : int = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__UpperCAmelCase : Tuple = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__UpperCAmelCase : List[Any] = self._generate_dummy_images(_A , _A , _A , _A )
__UpperCAmelCase : int = dict(
processor(
_A , text=_A , boxes=_A , return_tensors=_A , ) )
return inputs
| 157 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
__snake_case : int = 0
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
__snake_case : int = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(_A , _A)
def _lowercase (self : Any) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Union[str, Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Any = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[str]) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case : int = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(_A).to_dict()
config_dict.pop('image_processor_type')
__snake_case : Dict = CLIPImageProcessor(**_A)
# save in new folder
model_config.save_pretrained(_A)
config.save_pretrained(_A)
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
# make sure private variable is not incorrectly saved
__snake_case : Optional[int] = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_A , _A)
def _lowercase (self : Union[str, Any]) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = Path(_A) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
__snake_case : Any = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> str:
with self.assertRaisesRegex(
_A , 'clip-base is not a local folder and is not a valid model identifier'):
__snake_case : Tuple = AutoImageProcessor.from_pretrained('clip-base')
def _lowercase (self : Optional[int]) -> int:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : int = AutoImageProcessor.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : Tuple) -> Optional[int]:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Tuple) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A):
__snake_case : Optional[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
__snake_case : Any = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
__snake_case : Any = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Dict = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def _lowercase (self : int) -> str:
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoImageProcessor.register(_A , _A)
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = CustomImageProcessor.from_pretrained(_A)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : int) -> str:
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = True
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# If remote code is not set, the default is to use local
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(_A , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 172 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__):
_a : Any = ['''pixel_values''']
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : bool = True , **_SCREAMING_SNAKE_CASE : Optional[Any] , )-> None:
super().__init__(**UpperCamelCase_ )
lowerCAmelCase__ : Tuple = size if size is not None else {'''height''': 384, '''width''': 384}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : int = size
lowerCAmelCase__ : Dict = resample
lowerCAmelCase__ : Any = do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : Any = do_normalize
lowerCAmelCase__ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Union[str, Any] = do_convert_rgb
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Any , )-> np.ndarray:
lowerCAmelCase__ : int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
lowerCAmelCase__ : int = (size['''height'''], size['''width'''])
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[int, float] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] , )-> List[str]:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] , )-> np.ndarray:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Union[str, Any] , )-> PIL.Image.Image:
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = resample if resample is not None else self.resample
lowerCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : str = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase__ : List[Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Dict = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
lowerCAmelCase__ : Optional[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ : Optional[int] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ : int = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
lowerCAmelCase__ : Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowerCAmelCase__ : Optional[int] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCamelCase_ )
return encoded_outputs
| 355 |
from math import isqrt
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _a , _a ):
lowerCAmelCase__ : int = False
return [i for i in range(2 , _a ) if is_prime[i]]
def lowerCamelCase_ ( _a = 10**8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[int] = len(_a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211 | 0 |
def __UpperCAmelCase ( __a : List[str] = 50 ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Dict = '''informer'''
__lowercase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , lowerCAmelCase__ = "prob" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
# Informer
__SCREAMING_SNAKE_CASE = attention_type
__SCREAMING_SNAKE_CASE = sampling_factor
__SCREAMING_SNAKE_CASE = distil
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 100 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class a_ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : str , lowercase__ : Dict=2 , lowercase__ : str=True , lowercase__ : Tuple=False , lowercase__ : int=10 , lowercase__ : Optional[int]=3 , lowercase__ : Tuple=32 * 8 , lowercase__ : List[Any]=32 * 8 , lowercase__ : Optional[int]=4 , lowercase__ : Optional[Any]=64 , ):
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = num_queries
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_size
lowerCAmelCase__ = max_size
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = hidden_dim
lowerCAmelCase__ = hidden_dim
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
lowercase__)
lowerCAmelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase__)
lowerCAmelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase__) > 0.5
).float()
lowerCAmelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=lowercase__) > 0.5).long()
lowerCAmelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case ( self : int):
'''simple docstring'''
lowerCAmelCase__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase__ = self.num_queries
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = [1, 1, 1, 1]
lowerCAmelCase__ = self.num_channels
lowerCAmelCase__ = 64
lowerCAmelCase__ = 128
lowerCAmelCase__ = self.hidden_dim
lowerCAmelCase__ = self.hidden_dim
lowerCAmelCase__ = self.hidden_dim
return config
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __snake_case ( self : int , lowercase__ : Any , lowercase__ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = output.encoder_hidden_states
lowerCAmelCase__ = output.pixel_decoder_hidden_states
lowerCAmelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase__) , config.decoder_layers)
def __snake_case ( self : List[Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Optional[int]=False):
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase__ = MaskaFormerModel(config=lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(pixel_values=lowercase__ , pixel_mask=lowercase__)
lowerCAmelCase__ = model(lowercase__ , output_hidden_states=lowercase__)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(lowercase__ , lowercase__)
def __snake_case ( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = MaskaFormerForUniversalSegmentation(config=lowercase__)
model.to(lowercase__)
model.eval()
def comm_check_on_output(lowercase__ : str):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowerCAmelCase__ = model(pixel_values=lowercase__ , pixel_mask=lowercase__)
lowerCAmelCase__ = model(lowercase__)
comm_check_on_output(lowercase__)
lowerCAmelCase__ = model(
pixel_values=lowercase__ , pixel_mask=lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__)
comm_check_on_output(lowercase__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCAmelCase_ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = MaskaFormerModelTester(self)
lowerCAmelCase__ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__)
def __snake_case ( self : Optional[int]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__)
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase__)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def __snake_case ( self : Dict):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def __snake_case ( self : str):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def __snake_case ( self : str):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __snake_case ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __snake_case ( self : int):
'''simple docstring'''
pass
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowercase__)
lowerCAmelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__)
@slow
def __snake_case ( self : int):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase__ = MaskaFormerModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = (self.model_tester.min_size,) * 2
lowerCAmelCase__ = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase__),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase__),
'class_labels': torch.zeros(2 , 10 , device=lowercase__).long(),
}
lowerCAmelCase__ = self.model_tester.get_config()
lowerCAmelCase__ = MaskaFormerForUniversalSegmentation(lowercase__).to(lowercase__)
lowerCAmelCase__ = model(**lowercase__)
self.assertTrue(outputs.loss is not None)
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__)
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowercase__).to(lowercase__)
lowerCAmelCase__ = model(**lowercase__ , output_attentions=lowercase__)
self.assertTrue(outputs.attentions is not None)
def __snake_case ( self : Tuple):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase__ = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = model_class(lowercase__)
model.to(lowercase__)
model.train()
lowerCAmelCase__ = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__).loss
loss.backward()
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(lowercase__).to(lowercase__)
model.train()
lowerCAmelCase__ = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__)
lowerCAmelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
lowerCAmelCase__ = 1e-4
def __lowerCamelCase ( ):
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : str):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case ( self : str):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(lowercase__)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(lowercase__ , return_tensors='pt').to(lowercase__)
lowerCAmelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384))
with torch.no_grad():
lowerCAmelCase__ = model(**lowercase__)
lowerCAmelCase__ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]]).to(lowercase__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__))
lowerCAmelCase__ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]]).to(lowercase__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__))
lowerCAmelCase__ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]]).to(lowercase__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase__ , atol=lowercase__))
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase__).eval()
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(lowercase__ , return_tensors='pt').to(lowercase__)
lowerCAmelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384))
with torch.no_grad():
lowerCAmelCase__ = model(**lowercase__)
# masks_queries_logits
lowerCAmelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
lowerCAmelCase__ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowerCAmelCase__ = torch.tensor(lowercase__).to(lowercase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase__ , atol=lowercase__))
# class_queries_logits
lowerCAmelCase__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
lowerCAmelCase__ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=lowercase__))
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase__).eval()
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = image_processor(
[np.zeros((3, 800, 1_333)), np.zeros((3, 800, 1_333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , )
lowerCAmelCase__ = inputs['pixel_values'].to(lowercase__)
lowerCAmelCase__ = [el.to(lowercase__) for el in inputs['mask_labels']]
lowerCAmelCase__ = [el.to(lowercase__) for el in inputs['class_labels']]
with torch.no_grad():
lowerCAmelCase__ = model(**lowercase__)
self.assertTrue(outputs.loss is not None)
| 119 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['image_processor']
UpperCAmelCase_ = 'SamImageProcessor'
def __init__( self : Tuple , lowercase__ : Dict):
'''simple docstring'''
super().__init__(lowercase__)
lowerCAmelCase__ = self.image_processor
lowerCAmelCase__ = -10
lowerCAmelCase__ = self.image_processor.size['longest_edge']
def __call__( self : List[Any] , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor(
lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase__ = encoding_image_processor['original_sizes']
if hasattr(lowercase__ , 'numpy'): # Checks if Torch or TF tensor
lowerCAmelCase__ = original_sizes.numpy()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._check_and_preprocess_points(
input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , )
lowerCAmelCase__ = self._normalize_and_convert(
lowercase__ , lowercase__ , input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , return_tensors=lowercase__ , )
return encoding_image_processor
def __snake_case ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : str=None , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : Optional[Any]="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0]) for point in input_points
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__)
for point, original_size in zip(lowercase__ , lowercase__)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowerCAmelCase__ , lowerCAmelCase__ = self._pad_points_and_labels(lowercase__ , lowercase__)
lowerCAmelCase__ = np.array(lowercase__)
if input_labels is not None:
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0] , is_bounding_box=lowercase__)
for box in input_boxes
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__ , is_bounding_box=lowercase__)
for box, original_size in zip(lowercase__ , lowercase__)
]
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def __snake_case ( self : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = max([point.shape[0] for point in input_points])
lowerCAmelCase__ = []
for i, point in enumerate(lowercase__):
if point.shape[0] != expected_nb_points:
lowerCAmelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowerCAmelCase__ = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(lowercase__)
lowerCAmelCase__ = processed_input_points
return input_points, input_labels
def __snake_case ( self : Optional[Any] , lowercase__ : int , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : Optional[Any]=False):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = original_size
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor._get_preprocess_shape(lowercase__ , longest_edge=lowercase__)
lowerCAmelCase__ = deepcopy(lowercase__).astype(lowercase__)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 2 , 2)
lowerCAmelCase__ = coords[..., 0] * (new_w / old_w)
lowerCAmelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 4)
return coords
def __snake_case ( self : Dict , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : int=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(lowercase__ , 'numpy'): # Checks for TF or Torch tensor
lowerCAmelCase__ = input_points.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_points[0] , lowercase__):
raise ValueError('Input points must be a list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__) for input_point in input_points]
else:
lowerCAmelCase__ = None
if input_labels is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_labels.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_labels[0] , lowercase__):
raise ValueError('Input labels must be a list of list integers.')
lowerCAmelCase__ = [np.array(lowercase__) for label in input_labels]
else:
lowerCAmelCase__ = None
if input_boxes is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(lowercase__ , lowercase__)
or not isinstance(input_boxes[0] , lowercase__)
or not isinstance(input_boxes[0][0] , lowercase__)
):
raise ValueError('Input boxes must be a list of list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__).astype(np.floataa) for box in input_boxes]
else:
lowerCAmelCase__ = None
return input_points, input_labels, input_boxes
@property
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase__))
def __snake_case ( self : int , *lowercase__ : int , **lowercase__ : int):
'''simple docstring'''
return self.image_processor.post_process_masks(*lowercase__ , **lowercase__)
| 119 | 1 |
"""simple docstring"""
from math import pi, sqrt, tan
def _snake_case ( _snake_case : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( _snake_case : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _snake_case ( _snake_case : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _snake_case ( _snake_case : float , _snake_case : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowerCAmelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( _snake_case : float , _snake_case : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _snake_case ( _snake_case : float , _snake_case : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(_snake_case , 2 ) * torus_radius * tube_radius
def _snake_case ( _snake_case : float , _snake_case : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _snake_case ( _snake_case : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _snake_case ( _snake_case : float , _snake_case : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowerCAmelCase : Tuple = (sidea + sidea + sidea) / 2
lowerCAmelCase : Dict = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( _snake_case : float , _snake_case : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _snake_case ( _snake_case : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _snake_case ( _snake_case : float , _snake_case : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _snake_case ( _snake_case : float , _snake_case : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( _snake_case : int , _snake_case : float ):
if not isinstance(_snake_case , _snake_case ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 60 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """bart"""
__lowerCAmelCase : Optional[int] = ["""past_key_values"""]
__lowerCAmelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=5_02_65 , _lowerCamelCase : Optional[Any]=10_24 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Dict=40_96 , _lowerCamelCase : Tuple=16 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Tuple=40_96 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : int="gelu" , _lowerCamelCase : Any=10_24 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Optional[int]=0.02 , _lowerCamelCase : int=0.0 , _lowerCamelCase : Any=False , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=3 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : int=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Any=True , _lowerCamelCase : str=2 , _lowerCamelCase : str=2 , **_lowerCamelCase : str , ):
"""simple docstring"""
A_ : Dict = vocab_size
A_ : Union[str, Any] = max_position_embeddings
A_ : Union[str, Any] = d_model
A_ : Optional[int] = encoder_ffn_dim
A_ : Optional[Any] = encoder_layers
A_ : Union[str, Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : List[str] = decoder_layers
A_ : Any = decoder_attention_heads
A_ : List[Any] = dropout
A_ : Optional[int] = attention_dropout
A_ : List[Any] = activation_dropout
A_ : Tuple = activation_function
A_ : Any = init_std
A_ : Union[str, Any] = encoder_layerdrop
A_ : Optional[Any] = decoder_layerdrop
A_ : Tuple = classifier_dropout
A_ : Tuple = use_cache
A_ : List[str] = encoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _lowerCamelCase ):
A_ : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class lowercase ( __UpperCAmelCase):
@property
def a_ ( self : Optional[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ : Tuple = {0: '''batch'''}
A_ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ , A_ : Any = self.num_layers
for i in range(_lowerCamelCase ):
A_ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
A_ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
A_ : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def a_ ( self : str ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = super().outputs
else:
A_ : List[Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
A_ , A_ : int = self.num_layers
for i in range(_lowerCamelCase ):
A_ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
A_ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def a_ ( self : Union[str, Any] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
A_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
A_ : Tuple = seq_length if not self.use_past else 1
A_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
A_ : Dict = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : Union[str, Any] = common_inputs['''input_ids'''].shape
A_ : Any = common_inputs['''decoder_input_ids'''].shape[1]
A_ , A_ : Optional[Any] = self.num_attention_heads
A_ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Optional[Any] = decoder_seq_length + 3
A_ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
A_ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_ , A_ : Optional[Any] = self.num_layers
A_ : Optional[Any] = min(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
A_ : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
A_ : List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def a_ ( self : Optional[int] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
A_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A_ : Union[str, Any] = seqlen + 2
A_ , A_ : Tuple = self.num_layers
A_ , A_ : Optional[int] = self.num_attention_heads
A_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : str = common_inputs['''attention_mask'''].dtype
A_ : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
A_ : int = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def a_ ( self : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
A_ : List[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Dict = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
A_ : int = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
A_ : List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : List[str] = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def a_ ( self : Tuple , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
A_ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
A_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def a_ ( self : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
A_ : List[Any] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 167 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A_ )
class UpperCAmelCase_ ( A_ ):
lowercase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase__ = Features({'''image''': Image()} )
lowercase__ = Features({'''labels''': ClassLabel} )
lowercase__ = "image"
lowercase__ = "labels"
def __magic_name__ ( self : List[str] , snake_case_ : str ) -> Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
A__ = copy.deepcopy(self )
A__ = self.label_schema.copy()
A__ = features[self.label_column]
A__ = label_schema
return task_template
@property
def __magic_name__ ( self : Any ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 230 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = model.config
A__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
if "encoder.model" in name:
A__ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
A__ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
A__ = "encoder." + name
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
A__ = "encoder.layernorm.bias"
return name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[3] )
A__ = int(key_split[5] )
A__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
# load original model
A__ = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
A__, A__ = get_configs(lowercase_ )
A__ = DonutSwinModel(lowercase_ )
A__ = MBartForCausalLM(lowercase_ )
A__ = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
A__ = original_model.state_dict()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
A__ = load_dataset("hf-internal-testing/example-documents" )
A__ = dataset["test"][0]["image"].convert("RGB" )
A__ = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
A__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ = DonutProcessor(lowercase_ , lowercase_ )
A__ = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = "When is the coffee break?"
A__ = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ = "hello world"
else:
raise ValueError("Model name not supported" )
A__ = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
A__ = original_model.encoder.model.patch_embed(lowercase_ )
A__, A__ = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
A__ = original_model.encoder(lowercase_ )
A__ = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
A__ = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
A__ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : str = "▁"
_lowerCamelCase : Any = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowerCamelCase : str = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowerCamelCase : Optional[int] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowerCamelCase : int = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_lowerCamelCase : Dict = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["input_ids"]
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = RESOURCE_FILES_NAMES
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any=False , UpperCamelCase__ : int="utf8" , UpperCamelCase__ : Tuple="[UNK]" , UpperCamelCase__ : List[Any]="[SEP]" , UpperCamelCase__ : str="[PAD]" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCamelCase = do_lower_case
UpperCamelCase = sentencepiece_model_ckpt
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase = self.load_vocab(filepath=UpperCamelCase__ )
else:
UpperCamelCase = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase = {v: k for k, v in self.vocab.items()}
def A ( self : Optional[int] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase = self.tokenize(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = '', []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
UpperCamelCase = unicodedata.normalize('NFKC' , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase = token[1:]
UpperCamelCase = text[offset:].index(UpperCamelCase__ ) + offset
UpperCamelCase = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase = end
return token_mapping
@property
def A ( self : List[str] ):
"""simple docstring"""
return len(self.vocab )
def A ( self : List[str] ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A ( self : Optional[int] , UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def A ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=6_4 , UpperCamelCase__ : Optional[Any]=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
UpperCamelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
UpperCamelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
UpperCamelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
UpperCamelCase = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
UpperCamelCase = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
UpperCamelCase = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
UpperCamelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A ( self : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.convert_ids_to_tokens(UpperCamelCase__ )
UpperCamelCase = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def A ( self : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def A ( self : Optional[int] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : str=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def A ( self : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A ( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A ( self : str , UpperCamelCase__ : str ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
UpperCamelCase = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def A ( self : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = {}
with io.open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
UpperCamelCase = line.rstrip('\n' )
UpperCamelCase = int(UpperCamelCase__ )
return token_to_idx
def A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
UpperCamelCase = os.path.join(UpperCamelCase__ , 'sentencepiece.bpe.model' )
with open(UpperCamelCase__ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 28 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =0
@slow
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
# Check that tokenizer_type ≠ model_type
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
with pytest.raises(_lowerCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase =tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowerCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__lowercase =tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =TOKENIZER_MAPPING.values()
__lowercase =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_lowerCAmelCase) , _lowerCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_lowerCAmelCase)
__lowercase ='Hello, world. How are you?'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__lowercase =AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_lowerCAmelCase)
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =get_tokenizer_config('bert-base-cased')
__lowercase =config.pop('_commit_hash' , _lowerCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowerCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase =get_tokenizer_config(_lowerCAmelCase)
self.assertDictEqual(_lowerCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =get_tokenizer_config(_lowerCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
__lowercase =CustomTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
# Can register in two steps
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase =BertTokenizerFast.from_pretrained(_lowerCAmelCase)
bert_tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =CustomTokenizerFast.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = False
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# If remote code is not set, the default is to use local
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__lowercase =AutoTokenizer.from_pretrained('bert-base')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , revision='aaaaaa')
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 166 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=16 , lowerCamelCase=[32, 64, 128] , lowerCamelCase=[1, 2, 1] , lowerCamelCase=[2, 2, 4] , lowerCamelCase=2 , lowerCamelCase=2.0 , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.02 , lowerCamelCase=1E-5 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=8 , lowerCamelCase=["stage1", "stage2"] , lowerCamelCase=[1, 2] , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = hidden_sizes
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
__a = out_features
__a = out_indices
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = FocalNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a = None
__a = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = FocalNetForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = FocalNetForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.type_sequence_label_size
__a = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_snake_case : Tuple = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_snake_case : str = False
_snake_case : List[str] = False
_snake_case : Tuple = False
_snake_case : Dict = False
_snake_case : int = False
def a__ ( self ):
__a = FocalNetModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 , has_text_modality=lowerCamelCase )
def a__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ):
return
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def a__ ( self ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# FocalNet has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__a , __a , __a , __a = reshaped_hidden_states[0].shape
__a = (
reshaped_hidden_states[0].view(lowerCamelCase , lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
@slow
def a__ ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = FocalNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__a = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([0.2166, -0.4368, 0.2191] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (FocalNetBackbone,) if is_torch_available() else ()
_snake_case : int = FocalNetConfig
_snake_case : int = False
def a__ ( self ):
__a = FocalNetModelTester(self )
| 366 | """simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__:Tuple = {
"""openbmb/cpm-ant-10b""": 1024,
}
def _lowerCamelCase( a ):
__a = collections.OrderedDict()
with open(a , "r" , encoding="utf-8" ) as reader:
__a = reader.readlines()
for index, token in enumerate(a ):
__a = token.rstrip("\n" )
__a = index
return vocab
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ):
__a = vocab
__a = unk_token
__a = max_input_chars_per_word
def a__ ( self , lowerCamelCase ):
__a = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__a = 0
__a = []
while start < len(lowerCamelCase ):
__a = len(lowerCamelCase )
__a = None
while start < end:
__a = "".join(chars[start:end] )
if substr in self.vocab:
__a = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
__a = end
return sub_tokens
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = ["""input_ids""", """attention_mask"""]
_snake_case : int = False
def __init__( self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
__a = bod_token
__a = eod_token
__a = load_vocab(lowerCamelCase )
__a = self.encoder[space_token]
__a = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
__a = {v: k for k, v in self.encoder.items()}
__a = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def a__ ( self ):
return self.encoder[self.bod_token]
@property
def a__ ( self ):
return self.encoder[self.eod_token]
@property
def a__ ( self ):
return self.encoder["\n"]
@property
def a__ ( self ):
return len(self.encoder )
def a__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def a__ ( self , lowerCamelCase , **lowerCamelCase ):
__a = [i for i in token_ids if i >= 0]
__a = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return token in self.encoder
def a__ ( self , lowerCamelCase ):
return "".join(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if os.path.isdir(lowerCamelCase ):
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
__a = (filename_prefix + "-" if filename_prefix else "") + save_directory
__a = 0
if " " in self.encoder:
__a = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
__a = self.encoder["\n"]
del self.encoder["\n"]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
__a = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 268 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
__magic_name__ : Any = tmp_path / 'cache'
__magic_name__ : Union[str, Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : Dict = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Tuple = tmp_path / 'cache'
__magic_name__ : Optional[Any] = {'text': 'string'}
__magic_name__ : List[str] = features.copy() if features else default_expected_features
__magic_name__ : Any = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : Dict = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : List[Any] = tmp_path / 'cache'
__magic_name__ : int = {'text': 'string'}
__magic_name__ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Optional[Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Union[str, Any] = [text_path]
__magic_name__ : Optional[Any] = tmp_path / 'cache'
__magic_name__ : Tuple = {'text': 'string'}
__magic_name__ : Any = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
__magic_name__ : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Optional[Any] = tmp_path / 'cache'
__magic_name__ : List[Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : str = TextDatasetReader({'train': text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__magic_name__ : Union[str, Any] = {'text': 'string'}
__magic_name__ : Optional[Any] = features.copy() if features else default_expected_features
__magic_name__ : str = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : List[Any] = TextDatasetReader({'train': text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
"""simple docstring"""
if split:
__magic_name__ : List[Any] = {split: text_path}
else:
__magic_name__ : Optional[int] = 'train'
__magic_name__ : Optional[Any] = {'train': text_path, 'test': text_path}
__magic_name__ : Any = tmp_path / 'cache'
__magic_name__ : Union[str, Any] = {'text': 'string'}
__magic_name__ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 331 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 1 |
import json
import sys
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
__lowerCamelCase = json.load(UpperCamelCase__ )
__lowerCamelCase = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCamelCase__ ):
__lowerCamelCase = results[benchmark_name]
__lowerCamelCase = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
__lowerCamelCase = '| metric |'
__lowerCamelCase = '|--------|'
__lowerCamelCase = '| new / old (diff) |'
for metric_name in sorted(UpperCamelCase__ ):
__lowerCamelCase = benchmark_res[metric_name]
__lowerCamelCase = metric_vals['new']
__lowerCamelCase = metric_vals.get('old' , UpperCamelCase__ )
__lowerCamelCase = metric_vals.get('diff' , UpperCamelCase__ )
__lowerCamelCase = F""" {new_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCamelCase__ ) )
if __name__ == "__main__":
__A = sys.argv[1]
__A = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 348 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''sew-d'''
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__=2 , lowerCamelCase__=512 , lowerCamelCase__=256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = position_buckets
__lowerCamelCase = share_att_key
__lowerCamelCase = relative_attention
__lowerCamelCase = norm_rel_ebd
__lowerCamelCase = list(lowerCamelCase__ )
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = feature_layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Optional[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCamelCase : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _lowerCAmelCase , )
is not None
):
UpperCamelCase : List[str] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCamelCase : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCamelCase : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
UpperCamelCase : str = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
UpperCamelCase : List[Any] = True
if not attribute_used:
UpperCamelCase : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCamelCase : Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCamelCase : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCamelCase : int = True
elif attribute.endswith("_token_id" ):
UpperCamelCase : Optional[int] = True
# configuration class specific cases
if not case_allowed:
UpperCamelCase : Dict = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCamelCase : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCamelCase : List[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
UpperCamelCase : Union[str, Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCamelCase : Optional[int] = {}
if len(config_class.attribute_map ) > 0:
UpperCamelCase : Any = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCamelCase : List[Any] = inspect.getsourcefile(_lowerCAmelCase )
UpperCamelCase : Tuple = os.path.dirname(_lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCamelCase : List[str] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for fn in os.listdir(_lowerCAmelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
UpperCamelCase : Dict = []
for path in modeling_paths:
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
UpperCamelCase : List[str] = []
for config_param, default_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
UpperCamelCase : Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_lowerCAmelCase )
def A_ ( ) -> str:
UpperCamelCase : List[str] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCamelCase : Tuple = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(_lowerCAmelCase )
and issubclass(_lowerCAmelCase , _lowerCAmelCase )
and inspect.getmodule(_lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCamelCase : Any = check_config_attributes_being_used(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : int = unused_attributes
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Tuple = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 52 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A : Optional[int] = logging.getLogger(__name__)
class a__ ( a_ ):
def __init__( self , _a=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowercase : List[str] = label_idx
def __magic_name__ ( self , _a , _a ):
if isinstance(_a , _a ):
lowercase : Optional[Any] = mode.value
lowercase : List[str] = os.path.join(_a , f"""{mode}.txt""" )
lowercase : str = 1
lowercase : Optional[int] = []
with open(_a , encoding="utf-8" ) as f:
lowercase : List[Any] = []
lowercase : Optional[int] = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
guid_index += 1
lowercase : int = []
lowercase : int = []
else:
lowercase : Optional[Any] = line.split(" " )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
return examples
def __magic_name__ ( self , _a , _a , _a ):
lowercase : List[str] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase : Any = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_a )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
lowercase : Optional[Any] = f.read().splitlines()
if "O" not in labels:
lowercase : List[Any] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a__ ( a_ ):
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
lowercase : Tuple = f.read().splitlines()
if "O" not in labels:
lowercase : Optional[int] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a__ ( a_ ):
def __magic_name__ ( self , _a , _a ):
if isinstance(_a , _a ):
lowercase : List[Any] = mode.value
lowercase : Optional[int] = os.path.join(_a , f"""{mode}.txt""" )
lowercase : Tuple = 1
lowercase : List[str] = []
with open(_a , encoding="utf-8" ) as f:
for sentence in parse_incr(_a ):
lowercase : Optional[Any] = []
lowercase : str = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
guid_index += 1
return examples
def __magic_name__ ( self , _a , _a , _a ):
lowercase : str = 0
for sentence in parse_incr(_a ):
lowercase : List[Any] = preds_list[example_id]
lowercase : List[str] = ""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_a )
example_id += 1
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 202 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case__(a_ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = FlaxAutoencoderKL
@property
def snake_case ( self : int ):
lowercase__ : Dict = 4
lowercase__ : Optional[Any] = 3
lowercase__ : Optional[int] = (32, 32)
lowercase__ : str = jax.random.PRNGKey(0 )
lowercase__ : Optional[int] = jax.random.uniform(lowercase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowercase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
| 358 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase__ = 1.054571817e-34 # unit of ℏ : J * s
lowerCAmelCase__ = 3e8 # unit of c : m * s^-1
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase__ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase__ : str = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase__ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase : List[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
for attribute in key.split('''.''' ):
lowerCAmelCase__ : Any = getattr(A_ , A_ )
if weight_type is not None:
lowerCAmelCase__ : Tuple = getattr(A_ , A_ ).shape
else:
lowerCAmelCase__ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_g":
lowerCAmelCase__ : List[str] = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Union[str, Any] = value
elif weight_type == "bias":
lowerCAmelCase__ : Dict = value
else:
lowerCAmelCase__ : Union[str, Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Union[str, Any] = fairseq_model.state_dict()
lowerCAmelCase__ : List[str] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ : Any = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ : List[Any] = True
elif name.split('''.''' )[0] == "proj":
lowerCAmelCase__ : List[Any] = fairseq_model.proj
lowerCAmelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ : Optional[Any] = True
if "*" in mapped_key:
lowerCAmelCase__ : List[Any] = name.split(A_ )[0].split('''.''' )[-2]
lowerCAmelCase__ : Optional[Any] = mapped_key.replace('''*''' , A_ )
if "weight_g" in name:
lowerCAmelCase__ : Dict = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ : Dict = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ : List[Any] = '''bias'''
elif "weight" in name:
lowerCAmelCase__ : Union[str, Any] = '''weight'''
else:
lowerCAmelCase__ : str = None
set_recursively(A_ , A_ , A_ , A_ , A_ )
continue
if not is_used:
unused_weights.append(A_ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : List[Any] = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ : int = name.split('''.''' )
lowerCAmelCase__ : Tuple = int(items[0] )
lowerCAmelCase__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = emb.weight.shape
lowerCAmelCase__ : List[str] = nn.Linear(A_ , A_ , bias=A_ )
lowerCAmelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( A_ ):
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : str = f.readlines()
lowerCAmelCase__ : Any = [line.split(''' ''' )[0] for line in lines]
lowerCAmelCase__ : Tuple = len(A_ )
lowerCAmelCase__ : Dict = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(A_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
lowerCAmelCase__ : Optional[int] = WavaVecaConfig.from_pretrained(A_ )
lowerCAmelCase__ : Dict = SpeechaTextaConfig.from_pretrained(
A_ , vocab_size=A_ , decoder_layers=A_ , do_stable_layer_norm=A_ )
lowerCAmelCase__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCAmelCase__ : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ : str = WavaVecaModel(A_ )
lowerCAmelCase__ : Dict = recursively_load_weights_wavaveca(model.encoder , A_ )
lowerCAmelCase__ : List[Any] = SpeechaTextaForCausalLM(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCAmelCase__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=A_ , decoder=A_ )
lowerCAmelCase__ : Union[str, Any] = False
# add projection layer
lowerCAmelCase__ : Optional[int] = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ : List[Any] = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ : str = create_vocab_dict(A_ )
with open(os.path.join(A_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(A_ , A_ )
lowerCAmelCase__ : str = SpeechaTextaTokenizer(os.path.join(A_ , '''vocab.json''' ) )
tokenizer.save_pretrained(A_ )
lowerCAmelCase__ : Any = hf_wavavec.config.to_dict()
lowerCAmelCase__ : Any = tokenizer.pad_token_id
lowerCAmelCase__ : Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase__ : str = tokenizer.eos_token_id
lowerCAmelCase__ : Optional[Any] = '''speech_to_text_2'''
lowerCAmelCase__ : List[str] = '''wav2vec2'''
lowerCAmelCase__ : Dict = SpeechEncoderDecoderConfig.from_dict(A_ )
hf_wavavec.save_pretrained(A_ )
feature_extractor.save_pretrained(A_ )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__UpperCamelCase : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 106 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[Any] = hf_hub_download(
repo_id='nateraw/video-demo' ,filename='archery.mp4' ,repo_type='dataset')
__lowerCamelCase : str = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__ ,top_k=2)
__lowerCamelCase : Optional[Any] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
for example in examples:
__lowerCamelCase : str = video_classifier(SCREAMING_SNAKE_CASE__)
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,[
{'score': ANY(SCREAMING_SNAKE_CASE__), 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': ANY(SCREAMING_SNAKE_CASE__), 'label': ANY(SCREAMING_SNAKE_CASE__)},
] ,)
@require_torch
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
__lowerCamelCase : Dict = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} ,crop_size={'height': 1_0, 'width': 1_0})
__lowerCamelCase : int = pipeline(
'video-classification' ,model=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,frame_sampling_rate=4)
__lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' ,filename='archery.mp4' ,repo_type='dataset')
__lowerCamelCase : List[str] = video_classifier(SCREAMING_SNAKE_CASE__ ,top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] ,)
__lowerCamelCase : Union[str, Any] = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] ,)
@require_tf
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 356 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 113 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
if "model" in orig_key:
lowerCamelCase__: Tuple =orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase__: Any =orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase__: Optional[int] =orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase__: Dict =orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase__: int =orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase__: Dict =orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase__: List[Any] =orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase__: str =orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase__: int =orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase__: Union[str, Any] =orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase__: Optional[Any] =orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase__: Union[str, Any] =orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase__: Tuple =orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase__: Union[str, Any] =orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase__: Optional[int] =orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase__: Any =orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase__: Dict ="yoso." + orig_key
return orig_key
def lowerCAmelCase_ ( __a , __a ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Dict =orig_state_dict.pop(__a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase__: Union[str, Any] =val
lowerCamelCase__: Optional[Any] =orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase__: Dict =torch.arange(__a ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =torch.load(__a , map_location="cpu" )["model_state_dict"]
lowerCamelCase__: Any =YosoConfig.from_json_file(__a )
lowerCamelCase__: Optional[int] =YosoForMaskedLM(__a )
lowerCamelCase__: Dict =convert_checkpoint_helper(config.max_position_embeddings , __a )
print(model.load_state_dict(__a ) )
model.eval()
model.save_pretrained(__a )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 10 |
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[int] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = 0
__lowercase : Optional[Any] = 0
__lowercase : str = 0
# compute the shape of the output matrix
__lowercase : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowercase : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowercase : Optional[int] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : Any = 0
__lowercase : List[Any] = 0
return updated_arr
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[Any] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : int = 0
__lowercase : str = 0
__lowercase : List[str] = 0
__lowercase : Dict = 0
# compute the shape of the output matrix
__lowercase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowercase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowercase : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : int = 0
__lowercase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__lowerCAmelCase : List[Any] = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 156 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : List[str] ) -> Any:
__A : Any = 0
__A : Optional[int] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCAmelCase ( __snake_case : Any ) -> str:
if len(__snake_case ) <= 1:
return arr, 0
__A : List[Any] = len(__snake_case ) // 2
__A : Optional[Any] = arr[0:mid]
__A : List[Any] = arr[mid:]
__A ,__A : List[Any] = count_inversions_recursive(__snake_case )
__A ,__A : int = count_inversions_recursive(__snake_case )
__A ,__A : List[str] = _count_cross_inversions(__snake_case , __snake_case )
__A : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : str ) -> Optional[Any]:
__A : List[str] = []
__A : int = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCAmelCase ( ) -> Optional[Any]:
__A : Optional[int] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__A : List[str] = count_inversions_bf(__snake_case )
__A ,__A : int = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__A : List[str] = count_inversions_bf(__snake_case )
__A ,__A : str = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __snake_case )
# an empty list should also have zero inversions
__A : Dict = []
__A : Optional[int] = count_inversions_bf(__snake_case )
__A ,__A : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __snake_case )
if __name__ == "__main__":
main() | 190 |
'''simple docstring'''
lowercase__ : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowercase__ : List[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> Tuple:
__A : List[str] = start
# add current to visited
visited.append(__snake_case )
__A : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A : int = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
__A : Dict = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
lowercase__ : Tuple = topological_sort('''a''', [], [])
print(sort) | 190 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A : Any = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class __A:
def __init__( self , _snake_case = 14 ) -> None:
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
__a = primes[group]['''prime''']
__a = primes[group]['''generator''']
__a = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = pow(self.generator , self.__private_key , self.prime )
return hex(_snake_case )[2:]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> bool:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(_snake_case , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = int(_snake_case , base=16 )
if not self.is_valid_public_key(_snake_case ):
raise ValueError('''Invalid public key''' )
__a = pow(_snake_case , self.__private_key , self.prime )
return shaaaa(str(_snake_case ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case , _snake_case ) -> bool:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(_snake_case , (prime - 1) // 2 , _snake_case ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case , _snake_case , _snake_case = 14 ) -> str:
'''simple docstring'''
__a = int(_snake_case , base=16 )
__a = int(_snake_case , base=16 )
__a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_snake_case , _snake_case ):
raise ValueError('''Invalid public key''' )
__a = pow(_snake_case , _snake_case , _snake_case )
return shaaaa(str(_snake_case ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''facebook/nllb-200-distilled-600M'''
snake_case__ : Union[str, Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case__ : Optional[Any] = '''translator'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : Dict = LANGUAGE_CODES
snake_case__ : str = ['''text''', '''text''', '''text''']
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
a_ : str = self.lang_to_code[src_lang]
a_ : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 32 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__="None" , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = TFDebertaVaModel(config=UpperCAmelCase__ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = TFDebertaVaForMaskedLM(config=UpperCAmelCase__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = TFDebertaVaForSequenceClassification(config=UpperCAmelCase__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = TFDebertaVaForTokenClassification(config=UpperCAmelCase__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[Any] = False
def __A ( self ):
A__ = TFDebertaVaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def __A ( self ):
A__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __A ( self ):
pass
@slow
def __A ( self ):
A__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
A__ = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
A__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
A__ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 )
| 198 |
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
A__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase ( _A : str )-> dict[str, str]:
"""simple docstring"""
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(_A )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = input("Enter message to encode or decode: " ).strip()
A__ = input("Enter keyword: " ).strip()
A__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
A__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
A__ = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'efficientnet'
def __init__( self ,__UpperCamelCase = 3 ,__UpperCamelCase = 600 ,__UpperCamelCase = 2.0 ,__UpperCamelCase = 3.1 ,__UpperCamelCase = 8 ,__UpperCamelCase = [3, 3, 5, 3, 5, 5, 3] ,__UpperCamelCase = [32, 16, 24, 40, 80, 112, 192] ,__UpperCamelCase = [16, 24, 40, 80, 112, 192, 320] ,__UpperCamelCase = [] ,__UpperCamelCase = [1, 2, 2, 2, 1, 2, 1] ,__UpperCamelCase = [1, 2, 2, 3, 3, 4, 1] ,__UpperCamelCase = [1, 6, 6, 6, 6, 6, 6] ,__UpperCamelCase = 0.25 ,__UpperCamelCase = "swish" ,__UpperCamelCase = 2560 ,__UpperCamelCase = "mean" ,__UpperCamelCase = 0.02 ,__UpperCamelCase = 0.001 ,__UpperCamelCase = 0.99 ,__UpperCamelCase = 0.5 ,__UpperCamelCase = 0.2 ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : List[str] = num_channels
lowercase_ : Dict = image_size
lowercase_ : List[str] = width_coefficient
lowercase_ : Tuple = depth_coefficient
lowercase_ : Tuple = depth_divisor
lowercase_ : Dict = kernel_sizes
lowercase_ : Optional[Any] = in_channels
lowercase_ : Any = out_channels
lowercase_ : int = depthwise_padding
lowercase_ : int = strides
lowercase_ : Union[str, Any] = num_block_repeats
lowercase_ : Union[str, Any] = expand_ratios
lowercase_ : Optional[Any] = squeeze_expansion_ratio
lowercase_ : Tuple = hidden_act
lowercase_ : Union[str, Any] = hidden_dim
lowercase_ : Optional[Any] = pooling_type
lowercase_ : str = initializer_range
lowercase_ : Optional[Any] = batch_norm_eps
lowercase_ : Union[str, Any] = batch_norm_momentum
lowercase_ : str = dropout_rate
lowercase_ : Optional[int] = drop_connect_rate
lowercase_ : List[str] = sum(__UpperCamelCase ) * 4
class UpperCamelCase ( lowercase_ ):
lowercase = version.parse('1.11' )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
'''simple docstring'''
return 1e-5
| 213 | """simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 213 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def a__ ( __lowercase ) -> List[List[ImageInput]]:
if isinstance(__lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['pixel_values']
def __init__( self : str , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 2_55 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**a__ )
_A = size if size is not None else {"shortest_edge": 2_56}
_A = get_size_dict(a__ , default_to_square=a__ )
_A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_A = get_size_dict(a__ , param_name="crop_size" )
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = resample
_A = do_rescale
_A = rescale_factor
_A = offset
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self : str , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : str , ) -> np.ndarray:
'''simple docstring'''
_A = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
_A = get_resize_output_image_size(a__ , size["shortest_edge"] , default_to_square=a__ )
elif "height" in size and "width" in size:
_A = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def a_ ( self : Optional[Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Tuple , ) -> np.ndarray:
'''simple docstring'''
_A = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a__ , size=(size["height"], size["width"]) , data_format=a__ , **a__ )
def a_ ( self : Union[str, Any] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : List[str] , ) -> Tuple:
'''simple docstring'''
_A = image.astype(np.floataa )
if offset:
_A = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def a_ ( self : Tuple , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def a_ ( self : Union[str, Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_A = to_numpy_array(a__ )
if do_resize:
_A = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
_A = self.center_crop(a__ , size=a__ )
if do_rescale:
_A = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
_A = self.normalize(image=a__ , mean=a__ , std=a__ )
_A = to_channel_dimension_format(a__ , a__ )
return image
def a_ ( self : str , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Tuple , ) -> PIL.Image.Image:
'''simple docstring'''
_A = do_resize if do_resize is not None else self.do_resize
_A = resample if resample is not None else self.resample
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = offset if offset is not None else self.offset
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(a__ , default_to_square=a__ )
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(a__ , param_name="crop_size" )
if not valid_images(a__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_A = make_batched(a__ )
_A = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
_A = {"pixel_values": videos}
return BatchFeature(data=a__ , tensor_type=a__ ) | 364 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( __lowercase ) -> List[Any]:
_A = 384
if "tiny" in model_name:
_A = [3, 3, 9, 3]
_A = [96, 192, 384, 768]
if "small" in model_name:
_A = [3, 3, 27, 3]
_A = [96, 192, 384, 768]
if "base" in model_name:
_A = [3, 3, 27, 3]
_A = [128, 256, 512, 1024]
_A = 512
if "large" in model_name:
_A = [3, 3, 27, 3]
_A = [192, 384, 768, 1536]
_A = 768
if "xlarge" in model_name:
_A = [3, 3, 27, 3]
_A = [256, 512, 1024, 2048]
_A = 1024
# set label information
_A = 150
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = ConvNextConfig(
depths=__lowercase , hidden_sizes=__lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] )
_A = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def a__ ( __lowercase ) -> List[Any]:
_A = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = dct.pop(__lowercase )
_A = val
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
_A = model_name_to_url[model_name]
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["state_dict"]
_A = get_upernet_config(__lowercase )
_A = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_A = state_dict.pop(__lowercase )
if "bn" in key:
_A = key.replace("bn" , "batch_norm" )
_A = val
# rename keys
_A = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
model.load_state_dict(__lowercase )
# verify on image
_A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("RGB" )
_A = SegformerImageProcessor()
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_A = model(__lowercase )
if model_name == "upernet-convnext-tiny":
_A = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_A = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_A = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_A = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_A = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 163 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case : Dict = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = state_dict.pop(UpperCamelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCAmelCase = key.replace('backbone.0.body', 'backbone.conv_encoder.model' )
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCAmelCase_ : Any ):
__lowerCAmelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:256, :]
__lowerCAmelCase = in_proj_bias[:256]
__lowerCAmelCase = in_proj_weight[256:512, :]
__lowerCAmelCase = in_proj_bias[256:512]
__lowerCAmelCase = in_proj_weight[-256:, :]
__lowerCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:256, :]
__lowerCAmelCase = in_proj_bias[:256]
__lowerCAmelCase = in_proj_weight[256:512, :]
__lowerCAmelCase = in_proj_bias[256:512]
__lowerCAmelCase = in_proj_weight[-256:, :]
__lowerCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__lowerCAmelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCAmelCase = in_proj_weight_cross_attn[:256, :]
__lowerCAmelCase = in_proj_bias_cross_attn[:256]
__lowerCAmelCase = in_proj_weight_cross_attn[256:512, :]
__lowerCAmelCase = in_proj_bias_cross_attn[256:512]
__lowerCAmelCase = in_proj_weight_cross_attn[-256:, :]
__lowerCAmelCase = in_proj_bias_cross_attn[-256:]
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase , __lowerCAmelCase = image.size
__lowerCAmelCase = max(UpperCamelCase_, UpperCamelCase_ )
__lowerCAmelCase = 800 if 'detection' in checkpoint_url else 1000
__lowerCAmelCase = target_max_size / current_max_size
__lowerCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = F.to_tensor(UpperCamelCase_ )
__lowerCAmelCase = F.normalize(UpperCamelCase_, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
logger.info('Converting model...' )
# load original state dict
__lowerCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_, map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
__lowerCAmelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCAmelCase = state_dict.pop(UpperCamelCase_ )
__lowerCAmelCase = val
# create HuggingFace model and load state dict
__lowerCAmelCase = TableTransformerConfig(
backbone='resnet18', mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
__lowerCAmelCase = 15
__lowerCAmelCase = 2
__lowerCAmelCase = {0: 'table', 1: 'table rotated'}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
else:
__lowerCAmelCase = 125
__lowerCAmelCase = 6
__lowerCAmelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = DetrImageProcessor(
format='coco_detection', max_size=800 if 'detection' in checkpoint_url else 1000 )
__lowerCAmelCase = TableTransformerForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion
__lowerCAmelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
__lowerCAmelCase = hf_hub_download(repo_id='nielsr/example-pdf', repo_type='dataset', filename=UpperCamelCase_ )
__lowerCAmelCase = Image.open(UpperCamelCase_ ).convert('RGB' )
__lowerCAmelCase = normalize(resize(UpperCamelCase_, UpperCamelCase_ ) ).unsqueeze(0 )
__lowerCAmelCase = model(UpperCamelCase_ )
if "detection" in checkpoint_url:
__lowerCAmelCase = (1, 15, 3)
__lowerCAmelCase = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
__lowerCAmelCase = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
__lowerCAmelCase = (1, 125, 7)
__lowerCAmelCase = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
__lowerCAmelCase = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], UpperCamelCase_, atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], UpperCamelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
__lowerCAmelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(UpperCamelCase_ )
image_processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 284 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__SCREAMING_SNAKE_CASE = False
if num < 0:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = -num
__SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase_ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase__ : Union[str, Any] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase=False) -> Dict:
a , a = create_model(
"HTSAT-tiny" , "roberta" , __UpperCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__UpperCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Dict:
a = {}
a = r".*sequential.(\d+).*"
a = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a = key.replace(__UpperCamelCase , __UpperCamelCase)
if re.match(__UpperCamelCase , __UpperCamelCase):
# replace sequential layers with list
a = re.match(__UpperCamelCase , __UpperCamelCase).group(1)
a = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(__UpperCamelCase)//3}.linear.''')
elif re.match(__UpperCamelCase , __UpperCamelCase):
a = int(re.match(__UpperCamelCase , __UpperCamelCase).group(1))
# Because in CLAP they use `nn.Sequential`...
a = 1 if projecton_layer == 0 else 2
a = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''')
if "audio" and "qkv" in key:
# split qkv into query key and value
a = value
a = mixed_qkv.size(0) // 3
a = mixed_qkv[:qkv_dim]
a = mixed_qkv[qkv_dim : qkv_dim * 2]
a = mixed_qkv[qkv_dim * 2 :]
a = query_layer
a = key_layer
a = value_layer
else:
a = value
return model_state_dict
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False) -> Tuple:
a , a = init_clap(__UpperCamelCase , enable_fusion=__UpperCamelCase)
clap_model.eval()
a = clap_model.state_dict()
a = rename_state_dict(__UpperCamelCase)
a = ClapConfig()
a = enable_fusion
a = ClapModel(__UpperCamelCase)
# ignore the spectrogram embedding layer
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase)
model.save_pretrained(__UpperCamelCase)
transformers_config.save_pretrained(__UpperCamelCase)
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase__ : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 180 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__a : List[str] = """
import os
"""
__a : str = """
def foo():
import os
return False
"""
__a : Dict = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
__a : Tuple = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
__a : Tuple = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
__a : Union[str, Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
__a : Union[str, Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
__a : Union[str, Any] = """
import os
try:
import bar
except:
raise ValueError()
"""
__a : Tuple = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
__a : List[Any] = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
__a : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = os.path.join(lowercase , '''test_file.py''' )
with open(lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase )
__lowercase = get_imports(lowercase )
assert parsed_imports == ["os"] | 210 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : int = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Any = '''wavlm'''
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=3_20 , lowerCAmelCase__=8_00 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=80 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = conv_bias
__lowercase = num_buckets
__lowercase = max_bucket_distance
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = num_ctc_classes
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 210 | 1 |
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowerCAmelCase_ ():
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 82 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[Any] = """mock-s3-bucket"""
UpperCAmelCase_: str = F's3://{mock_bucket}'
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path.startswith("""s3://""" ) is False
UpperCAmelCase_: Tuple = """./local/path"""
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is True
UpperCAmelCase_: Optional[Any] = fsspec.filesystem("""file""" )
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: List[str] , lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: int = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
UpperCAmelCase_: Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_: str = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = os.path.basename(lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f, open(lowerCAmelCase__ , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
UpperCAmelCase_: Tuple = compressed_file_paths[protocol]
UpperCAmelCase_: int = """dataset.jsonl"""
UpperCAmelCase_: Any = F'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase_ , *UpperCAmelCase_: Dict = fsspec.get_fs_token_paths(lowerCAmelCase__ )
assert fs.isfile(lowerCAmelCase__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Tuple = hf_api.dataset_info(lowerCAmelCase__ , token=lowerCAmelCase__ )
UpperCAmelCase_: List[str] = HfFileSystem(repo_info=lowerCAmelCase__ , token=lowerCAmelCase__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(lowerCAmelCase__ ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[str] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase__ , lowerCAmelCase__ , clobber=lowerCAmelCase__ )
with pytest.warns(lowerCAmelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 82 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
def snake_case ( snake_case__ :Union[str, Any]) -> Optional[Any]:
_A = {}
_A = os.path.join(a__ , """all_results.json""")
if os.path.exists(a__):
with open(a__ , """r""") as f:
_A = json.load(a__)
else:
raise ValueError(F'''can\'t find {path}''')
return results
_SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class a ( _lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
import xla_spawn
_A = self.get_auto_remove_tmp_dir()
_A = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
_A = time()
xla_spawn.main()
_A = time()
_A = get_results(__UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def UpperCAmelCase ( self ) -> Dict:
import xla_spawn
_A = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
xla_spawn.main()
| 180 | """simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any ) -> Tuple:
raise NotImplementedError()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
raise NotImplementedError()
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Dict , __UpperCamelCase : "AutoTokenizer" , __UpperCamelCase : bool = False , **__UpperCamelCase : Tuple ) -> str:
_UpperCamelCase = tokenizer
_UpperCamelCase = skip_prompt
_UpperCamelCase = decode_kwargs
# variables used in the streaming process
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = True
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Dict ) -> Optional[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
_UpperCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCamelCase = text[self.print_len :]
self.print_len += len(__UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCamelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(__UpperCamelCase )
self.on_finalized_text(__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCamelCase = text[self.print_len :]
_UpperCamelCase = []
_UpperCamelCase = 0
else:
_UpperCamelCase = ''''''
_UpperCamelCase = True
self.on_finalized_text(__UpperCamelCase , stream_end=__UpperCamelCase )
def _UpperCamelCase ( self : int , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> Tuple:
print(__UpperCamelCase , flush=__UpperCamelCase , end='''''' if not stream_end else None )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Union[str, Any] , __UpperCamelCase : "AutoTokenizer" , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[float] = None , **__UpperCamelCase : Optional[int] ) -> Optional[Any]:
super().__init__(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = Queue()
_UpperCamelCase = None
_UpperCamelCase = timeout
def _UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> Any:
self.text_queue.put(__UpperCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[Any] ) -> List[str]:
return self
def _UpperCamelCase ( self : int ) -> Dict:
_UpperCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 256 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
UpperCamelCase__ : Tuple = precision
UpperCamelCase__ : Dict = ceil(precision / 14 )
UpperCamelCase__ : Any = 42_6880 * Decimal(1_0005 ).sqrt()
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Tuple = 1359_1409
UpperCamelCase__ : Tuple = Decimal(lowerCamelCase__ )
for k in range(1 , lowerCamelCase__ ):
UpperCamelCase__ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase : int =50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 354 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
UpperCamelCase__ : List[Any] = f'The input value of [n={number}] has to be > 0'
raise ValueError(__lowerCAmelCase )
else:
UpperCamelCase__ : Optional[Any] = sylvester(number - 1 )
UpperCamelCase__ : str = num - 1
UpperCamelCase__ : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""") | 196 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: Any = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Optional[Any] = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Any = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Any = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
if not conversation_id:
__A : List[Any] = uuid.uuida()
if past_user_inputs is None:
__A : List[str] = []
if generated_responses is None:
__A : Tuple = []
__A : uuid.UUID = conversation_id
__A : List[str] = past_user_inputs
__A : List[str] = generated_responses
__A : Optional[str] = text
def __eq__( self , __lowerCamelCase ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__A : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__A : Union[str, Any] = text
def UpperCamelCase__( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__A : List[Any] = None
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
self.generated_responses.append(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__A : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__A : Tuple = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__A : Union[str, Any] = self.tokenizer.eos_token
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
__A : str = {}
__A : List[str] = {}
__A : Any = {}
if min_length_for_response is not None:
__A : int = min_length_for_response
if minimum_tokens is not None:
__A : Any = minimum_tokens
if "max_length" in generate_kwargs:
__A : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__A : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __lowerCamelCase , __lowerCamelCase=0 , **__lowerCamelCase ):
'''simple docstring'''
__A : Any = super().__call__(__lowerCamelCase , num_workers=__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=32 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__A : List[Any] = self.tokenizer._build_conversation_input_ids(__lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__A : int = self._legacy_parse_and_tokenize(__lowerCamelCase )
if self.framework == "pt":
__A : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__A : int = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=10 , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__A : str = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__A : str = max_length - minimum_tokens
__A : Any = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__A : Union[str, Any] = model_inputs['''attention_mask'''][:, -trim:]
__A : Dict = model_inputs.pop('''conversation''' )
__A : List[str] = max_length
__A : Dict = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
if self.model.config.is_encoder_decoder:
__A : Any = 1
else:
__A : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=True ):
'''simple docstring'''
__A : int = model_outputs['''output_ids''']
__A : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
__A : Dict = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__lowerCamelCase )
return conversation
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = self.tokenizer.eos_token_id
__A : List[str] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
if len(__lowerCamelCase ) > self.tokenizer.model_max_length:
__A : List[str] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 179 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = os.path.abspath(_UpperCamelCase )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowercase : Optional[int] = tf.train.list_variables(_UpperCamelCase )
lowercase : Optional[int] = []
lowercase : Optional[int] = []
lowercase : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase : int = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase : List[Any] = name[1:]
# figure out how many levels deep the name is
lowercase : Optional[int] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_UpperCamelCase )
# read data
lowercase : Any = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase )
names.append('''/'''.join(_UpperCamelCase ) )
arrays.append(_UpperCamelCase )
logger.info(f"""Read a total of {len(_UpperCamelCase ):,} layers""" )
# Sanity check
if len(set(_UpperCamelCase ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})""" )
lowercase : List[str] = list(set(_UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_UpperCamelCase, _UpperCamelCase ):
lowercase : Optional[int] = full_name.split('''/''' )
lowercase : Tuple = model
lowercase : Any = []
for i, m_name in enumerate(_UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowercase : Tuple = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowercase : int = getattr(_UpperCamelCase, '''embeddings''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowercase : int = getattr(_UpperCamelCase, '''encoder''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''layer''' )
lowercase : List[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowercase : str = getattr(_UpperCamelCase, '''pooler''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowercase : str = getattr(_UpperCamelCase, '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowercase : List[Any] = getattr(_UpperCamelCase, '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowercase : int = getattr(_UpperCamelCase, '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowercase : Tuple = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''attention''' )
lowercase : Any = getattr(_UpperCamelCase, '''output''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''output''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''output''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''output''' )
lowercase : int = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowercase : Dict = getattr(_UpperCamelCase, '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''intermediate''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowercase : str = getattr(_UpperCamelCase, '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowercase : Dict = getattr(_UpperCamelCase, '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowercase : Any = '''.'''.join(_UpperCamelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''', _UpperCamelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''', _UpperCamelCase ):
lowercase : Any = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase : List[str] = array.transpose()
if pointer.shape == array.shape:
lowercase : Optional[Any] = torch.from_numpy(_UpperCamelCase )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
logger.info(f"""Loading model based on config from {config_path}...""" )
lowercase : List[Any] = BertConfig.from_json_file(_UpperCamelCase )
lowercase : Dict = BertModel(_UpperCamelCase )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict(), _UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 173 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Dict = int(_UpperCamelCase )
# Initialize Result
lowercase : Union[str, Any] = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__a = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__a = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 173 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __lowerCAmelCase ( self , A ) -> Any:
with open(__UpperCAmelCase , encoding='''utf-8''' ) as input_file:
_UpperCAmelCase : List[str] = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
_UpperCAmelCase : Any = input_file.read()
_UpperCAmelCase : Any = regexp.search(__UpperCAmelCase )
return match
def __lowerCAmelCase ( self , A ) -> Optional[int]:
with open(__UpperCAmelCase , encoding='''utf-8''' ) as input_file:
_UpperCAmelCase : str = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
_UpperCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCAmelCase : Any = regexp.finditer(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : str = Path('''./datasets''' )
_UpperCAmelCase : Dict = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__UpperCAmelCase ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = Path('''./datasets''' )
_UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__UpperCAmelCase ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 263 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''xlm-roberta-xl'''
def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : int = position_embedding_type
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[Any] = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , A__=None , **A__ ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ : Dict = model
A__ : Any = kwargs.get("""model_save_dir""" , A__ )
A__ : Optional[int] = kwargs.get("""latest_model_name""" , A__ )
def __call__( self , **A__ ):
A__ : int = {k: np.array(A__ ) for k, v in kwargs.items()}
return self.model.run(A__ , A__ )
@staticmethod
def __A ( A__ , A__=None , A__=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(A__ , providers=[provider] , sess_options=A__ )
def __A ( self , A__ , A__ = None , **A__ ):
A__ : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A__ : Optional[int] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ : str = self.model_save_dir.joinpath(A__ )
if src_path.exists():
A__ : List[str] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
def __A ( self , A__ , **A__ , ):
if os.path.isfile(A__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(A__ , exist_ok=A__ )
# saving model weights/files
self._save_pretrained(A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = None , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , A__ = None , **A__ , ):
A__ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A__ ):
A__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(A__ , A__ ) , provider=A__ , sess_options=A__ )
A__ : Optional[Any] = Path(A__ )
# load model from hub
else:
# download model
A__ : Union[str, Any] = hf_hub_download(
repo_id=A__ , filename=A__ , use_auth_token=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , )
A__ : List[str] = Path(A__ ).parent
A__ : str = Path(A__ ).name
A__ : Optional[int] = OnnxRuntimeModel.load_model(A__ , provider=A__ , sess_options=A__ )
return cls(model=A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = True , A__ = None , A__ = None , **A__ , ):
A__ : Optional[Any] = None
if len(str(A__ ).split("""@""" ) ) == 2:
A__ , A__ : Union[str, Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , use_auth_token=A__ , **A__ , )
| 141 | 1 |
from math import sqrt
def _lowerCAmelCase ( lowerCAmelCase_ :int )->bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
snake_case_ = True
# 0 and 1 are none primes.
if number <= 1:
snake_case_ = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
snake_case_ = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->Union[str, Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
snake_case_ = list(range(2 , n + 1 ) )
snake_case_ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
snake_case_ = 0
# filters actual prime numbers.
snake_case_ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Optional[int]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
snake_case_ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->int:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
snake_case_ = [] # this list will be returns of the function.
# potential prime number factors.
snake_case_ = 2
snake_case_ = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] )->Optional[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ = 0
# prime factorization of 'number'
snake_case_ = prime_factorization(lowerCAmelCase_ )
snake_case_ = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :Dict )->Dict:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
snake_case_ = 0
# prime factorization of 'number'
snake_case_ = prime_factorization(lowerCAmelCase_ )
snake_case_ = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Dict:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->List[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Optional[Any]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
snake_case_ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
snake_case_ = get_prime_numbers(lowerCAmelCase_ )
snake_case_ = len(lowerCAmelCase_ )
# run variable for while-loops.
snake_case_ = 0
snake_case_ = None
# exit variable. for break up the loops
snake_case_ = True
while i < len_pn and loop:
snake_case_ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
snake_case_ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Union[str, Any] )->List[str]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
snake_case_ = 0
while numbera != 0:
snake_case_ = numbera % numbera
snake_case_ = numbera
snake_case_ = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Dict )->List[str]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
snake_case_ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
snake_case_ = prime_factorization(lowerCAmelCase_ )
snake_case_ = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
snake_case_ = []
snake_case_ = []
snake_case_ = max(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
snake_case_ = prime_fac_a.count(lowerCAmelCase_ )
snake_case_ = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
snake_case_ = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
snake_case_ = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->List[Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
snake_case_ = 0
snake_case_ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :List[Any] )->str:
'''simple docstring'''
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
snake_case_ = p_number_a + 1 # jump to the next number
snake_case_ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->Optional[int]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
snake_case_ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :str )->List[str]:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
snake_case_ = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :int )->Any:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
snake_case_ = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->Dict:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
snake_case_ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Tuple:
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
snake_case_ = 0
snake_case_ = 1
snake_case_ = 1 # this will be return
for _ in range(n - 1 ):
snake_case_ = ans
ans += fiba
snake_case_ = tmp
return ans
| 159 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE :Any = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE = GPTaTokenizer
def __init__( self : Any , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = kwargs.pop("add_bos_token" , _lowerCAmelCase )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
snake_case_ = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**_lowerCAmelCase )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self : List[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[str] ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ) -> BatchEncoding:
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 159 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase: Dict = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Any = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_UpperCamelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = None ) -> list[list[str]]:
'''simple docstring'''
lowercase : str = word_bank or []
# create a table
lowercase : int = len(_UpperCAmelCase ) + 1
lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowercase : int = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 255 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class A__ ( __snake_case ):
def __init__( self , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A_ , **A_ ):
'''simple docstring'''
return super().__call__(A_ , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
if "candidate_labels" in kwargs:
UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __UpperCamelCase( self , A_ , A_=None , A_="This is a photo of {}." ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = load_image(A_ )
UpperCamelCase : int = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCamelCase : Union[str, Any] = candidate_labels
UpperCamelCase : Tuple = [hypothesis_template.format(A_ ) for x in candidate_labels]
UpperCamelCase : Union[str, Any] = self.tokenizer(A_ , return_tensors=self.framework , padding=A_ )
UpperCamelCase : Union[str, Any] = [text_inputs]
return inputs
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = model_inputs.pop("candidate_labels" )
UpperCamelCase : Optional[Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , A_ ):
UpperCamelCase : Optional[int] = text_inputs[0]
else:
# Batching case.
UpperCamelCase : List[Any] = text_inputs[0][0]
UpperCamelCase : int = self.model(**A_ , **A_ )
UpperCamelCase : str = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = model_outputs.pop("candidate_labels" )
UpperCamelCase : Optional[Any] = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCamelCase : Dict = probs.tolist()
if not isinstance(A_ , A_ ):
UpperCamelCase : Dict = [scores]
elif self.framework == "tf":
UpperCamelCase : Any = stable_softmax(A_ , axis=-1 )
UpperCamelCase : List[str] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(A_ , A_ ) , key=lambda A_ : -x[0] )
]
return result
| 356 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( __snake_case ):
def __init__( self , A_ , A_=None , A_=None , A_=0 ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 1.0 if scale is None else scale
UpperCamelCase : Optional[int] = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Union[str, Any] = args_dim
UpperCamelCase : str = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
UpperCamelCase : Union[str, Any] = domain_map
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = function
def __UpperCamelCase( self , A_ , *A_ ):
'''simple docstring'''
return self.function(A_ , *A_ )
class A__ :
_UpperCAmelCase :type
_UpperCAmelCase :int
_UpperCAmelCase :Dict[str, int]
def __init__( self , A_ = 1 ):
'''simple docstring'''
UpperCamelCase : Tuple = dim
UpperCamelCase : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , ):
'''simple docstring'''
UpperCamelCase : str = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 0.0
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCamelCase( self , *A_ ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_UpperCAmelCase :type = StudentT
@classmethod
def __UpperCamelCase( cls , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase : int = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1}
_UpperCAmelCase :type = Normal
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1}
_UpperCAmelCase :type = NegativeBinomial
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 140 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _lowerCamelCase :
_lowerCamelCase :int
_lowerCamelCase :Node | None = None
_lowerCamelCase :Node | None = None
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Tuple = Node(1 )
lowerCAmelCase__ : Any = Node(2 )
lowerCAmelCase__ : List[str] = Node(3 )
lowerCAmelCase__ : Any = Node(4 )
lowerCAmelCase__ : Dict = Node(5 )
return tree
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase_ ( __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : list[Any] = []
if root is None:
return output
lowerCAmelCase__ : Optional[Any] = deque([root] )
while process_queue:
lowerCAmelCase__ : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : list[Any] = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(a_ , a_ )
return output
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : list[Any] = []
def populate_output(__UpperCAmelCase , __UpperCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(a_ , a_ )
return output
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
if root is None:
return []
lowerCAmelCase__ : list[Sequence[Node | None]] = []
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = height(a_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a_ , a_ ) )
lowerCAmelCase__ : int = 1
else:
output.append(get_nodes_from_right_to_left(a_ , a_ ) )
lowerCAmelCase__ : Union[str, Any] = 0
return output
def lowercase_ ( ) -> List[Any]: # Main function for testing.
lowerCAmelCase__ : List[str] = make_tree()
print(f"""In-order Traversal: {inorder(a_ )}""" )
print(f"""Pre-order Traversal: {preorder(a_ )}""" )
print(f"""Post-order Traversal: {postorder(a_ )}""" , """\n""" )
print(f"""Height of Tree: {height(a_ )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(a_ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(a_ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(a_ , level=a_ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 242 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''vit_msn'''
def __init__( self : Optional[int] , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=1e-06 , lowerCAmelCase__ : Union[str, Any]=2_2_4 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : str=True , **lowerCAmelCase__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Optional[int] = qkv_bias | 145 | 0 |
from collections import deque
class __snake_case :
def __init__( self , snake_case__ , snake_case__ , snake_case__ ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =process_name # process name
UpperCAmelCase : List[str] =arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase : int =arrival_time
UpperCAmelCase : List[str] =burst_time # remaining burst time
UpperCAmelCase : Any =0 # total time of the process wait in ready queue
UpperCAmelCase : Any =0 # time from arrival time to completion time
class __snake_case :
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
'''simple docstring'''
UpperCAmelCase : Any =number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase : int =time_slices
# unfinished process is in this ready_queue
UpperCAmelCase : Tuple =queue
# current time
UpperCAmelCase : Optional[int] =current_time
# finished process is in this sequence queue
UpperCAmelCase : deque[Process] =deque()
def UpperCAmelCase__ ( self ) -> list[str]:
'''simple docstring'''
UpperCAmelCase : Dict =[]
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__ ( self , snake_case__ ) -> list[int]:
'''simple docstring'''
UpperCAmelCase : Tuple =[]
for i in range(len(snake_case__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__ ( self , snake_case__ ) -> list[int]:
'''simple docstring'''
UpperCAmelCase : Any =[]
for i in range(len(snake_case__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__ ( self , snake_case__ ) -> list[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =[]
for i in range(len(snake_case__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__ ( self , snake_case__ ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__ ( self , snake_case__ ) -> deque[Process]:
'''simple docstring'''
UpperCAmelCase : deque[Process] =deque() # sequence deque of finished process
while len(snake_case__ ) != 0:
UpperCAmelCase : Dict =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(snake_case__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase : int =0
# set the process's turnaround time because it is finished
UpperCAmelCase : Any =self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase : Tuple =self.current_time
# add the process to queue that has finished queue
finished.append(snake_case__ )
self.finish_queue.extend(snake_case__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
UpperCAmelCase : deque[Process] =deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(snake_case__ ) ):
UpperCAmelCase : Optional[Any] =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(snake_case__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase : Any =self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(snake_case__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase : Optional[int] =0
# set the finish time
UpperCAmelCase : Tuple =self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase : int =self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(snake_case__ )
self.finish_queue.extend(snake_case__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__ ( self ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase : Union[str, Any] =self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__snake_case = Process('''P1''', 0, 53)
__snake_case = Process('''P2''', 0, 17)
__snake_case = Process('''P3''', 0, 68)
__snake_case = Process('''P4''', 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
__snake_case = Process('''P1''', 0, 53)
__snake_case = Process('''P2''', 0, 17)
__snake_case = Process('''P3''', 0, 68)
__snake_case = Process('''P4''', 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
__snake_case = MLFQ(number_of_queues, time_slices, queue, 0)
__snake_case = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 361 | import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __snake_case ( lowerCamelCase__ ):
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =tempfile.mkdtemp()
UpperCAmelCase : Any =8
# DPR tok
UpperCAmelCase : List[Any] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : Optional[Any] =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
UpperCAmelCase : Union[str, Any] =os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase : int =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase : List[str] =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase : Optional[int] =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase : str ={'''unk_token''': '''<unk>'''}
UpperCAmelCase : Optional[int] =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
UpperCAmelCase : int =os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : int =os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case__ ) )
def UpperCAmelCase__ ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCAmelCase__ ( self ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCAmelCase__ ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.get_dummy_dataset()
UpperCAmelCase : int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase : Union[str, Any] =dataset
UpperCAmelCase : Any =RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] =self.get_dummy_dataset()
UpperCAmelCase : Union[str, Any] =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
UpperCAmelCase : List[Any] =os.path.join(self.tmpdirname , '''dataset''' )
UpperCAmelCase : str =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
UpperCAmelCase : Union[str, Any] =RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase : List[str] =RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , )
return retriever
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase : int =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
UpperCAmelCase : Dict =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
UpperCAmelCase : List[Any] ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(snake_case__ , open(snake_case__ , '''wb''' ) )
UpperCAmelCase : Tuple =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
UpperCAmelCase : Union[str, Any] =RagRetriever(
snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any =1
UpperCAmelCase : Tuple =self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str =retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , snake_case__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase : Optional[int] =self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
UpperCAmelCase : List[str] =RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Dict =retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] =1
UpperCAmelCase : Any =self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
UpperCAmelCase : Tuple =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple =retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , snake_case__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
UpperCAmelCase : List[Any] =RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Dict =retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int =1
UpperCAmelCase : List[str] =self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
UpperCAmelCase : str =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any =retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , snake_case__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
UpperCAmelCase : Dict =RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCAmelCase : str =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : List[Any] =retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] =1
UpperCAmelCase : List[str] =self.get_dummy_legacy_index_retriever()
UpperCAmelCase : Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] =retriever.retrieve(snake_case__ , n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , snake_case__ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : str =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
UpperCAmelCase : Any =RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCAmelCase : str =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Union[str, Any] =retriever.retrieve(snake_case__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
import torch
UpperCAmelCase : Union[str, Any] =1
UpperCAmelCase : Optional[Any] =self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : Any =[[5, 7], [10, 11]]
UpperCAmelCase : int =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : int =retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase : Optional[int] =retriever(
snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
self.assertIsInstance(snake_case__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase : str =1
UpperCAmelCase : Any =self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
UpperCAmelCase : Tuple =[[5, 7], [10, 11]]
UpperCAmelCase : Dict =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Any =retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , snake_case__ ) # check for doc token related keys in dictionary.
| 78 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
_SCREAMING_SNAKE_CASE = {"""allegro/herbert-base-cased""": 5_14}
_SCREAMING_SNAKE_CASE = {}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[int] = VOCAB_FILES_NAMES
__magic_name__: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: Any = PRETRAINED_INIT_CONFIGURATION
__magic_name__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Dict = HerbertTokenizer
def __init__( self : str , _A : Union[str, Any]=None , _A : Dict=None , _A : Union[str, Any]=None , _A : Union[str, Any]="<s>" , _A : Optional[Any]="<unk>" , _A : List[Any]="<pad>" , _A : Dict="<mask>" , _A : str="</s>" , **_A : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , sep_token=_A , **_A , )
def UpperCAmelCase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : Tuple = [self.cls_token_id]
snake_case_ : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Tuple , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 1 |
'''simple docstring'''
import argparse
import datetime
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str:
_SCREAMING_SNAKE_CASE = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
_SCREAMING_SNAKE_CASE = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__A ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
_SCREAMING_SNAKE_CASE = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
_SCREAMING_SNAKE_CASE = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
_SCREAMING_SNAKE_CASE = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
_SCREAMING_SNAKE_CASE = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
_SCREAMING_SNAKE_CASE = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
_SCREAMING_SNAKE_CASE = datetime.date(int(__A ) , int(__A ) , int(__A ) )
# Start math
if m <= 2:
_SCREAMING_SNAKE_CASE = y - 1
_SCREAMING_SNAKE_CASE = m + 12
# maths var
_SCREAMING_SNAKE_CASE = int(str(__A )[:2] )
_SCREAMING_SNAKE_CASE = int(str(__A )[2:] )
_SCREAMING_SNAKE_CASE = int(2.6 * m - 5.3_9 )
_SCREAMING_SNAKE_CASE = int(c / 4 )
_SCREAMING_SNAKE_CASE = int(k / 4 )
_SCREAMING_SNAKE_CASE = int(d + k )
_SCREAMING_SNAKE_CASE = int(t + u + v + x )
_SCREAMING_SNAKE_CASE = int(z - (2 * c) )
_SCREAMING_SNAKE_CASE = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
_SCREAMING_SNAKE_CASE = f"""Your date {date_input}, is a {days[str(__A )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowerCamelCase_ = parser.parse_args()
zeller(args.date_input)
| 111 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''deberta-v2'''
def __init__( self : str , __lowerCamelCase : Union[str, Any]=1_2_8_1_0_0 , __lowerCamelCase : Optional[int]=1_5_3_6 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Tuple=6_1_4_4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : int=1e-7 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=-1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any="gelu" , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = max_relative_positions
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(__lowerCamelCase ) == str:
_SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split("|" )]
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kwargs.get("pooler_hidden_size" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = pooler_dropout
_SCREAMING_SNAKE_CASE = pooler_hidden_act
class lowercase_ ( A ):
"""simple docstring"""
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1_2
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=__lowerCamelCase , framework=__lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 111 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Any:
__lowercase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
__lowercase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
__lowercase : Any = s_dict.pop(UpperCamelCase__ )
elif "subsample" in key:
__lowercase : str = s_dict.pop(UpperCamelCase__ )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Optional[Any] = emb.weight.shape
__lowercase : Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__lowercase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
__lowercase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' )
__lowercase : Optional[int] = mam_aaa['''args''']
__lowercase : Dict = mam_aaa['''model''']
__lowercase : Any = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
__lowercase : str = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase : Optional[int] = args.share_decoder_input_output_embed
__lowercase : Any = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(''',''' )]
__lowercase : Dict = SpeechaTextConfig(
vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=200 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , )
__lowercase : Tuple = SpeechaTextForConditionalGeneration(UpperCamelCase__ )
__lowercase : Optional[Any] = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F' but all the following weights are missing {missing}' )
if tie_embeds:
__lowercase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowercase : Any = lm_head_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 156 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCamelCase_ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCamelCase_ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCamelCase_ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 350 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 303 | 0 |
from collections.abc import Sequence
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] = False ) -> Dict:
if not arr:
return 0
__snake_case = 0 if allow_empty_subarrays else float('''-inf''' )
__snake_case = 0.0
for num in arr:
__snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
__snake_case = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 24 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase__ :Optional[int] = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 254 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.