code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( A__ , A__ , A__ , A__=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__lowercase = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(A__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=A__ , dim=0 )
__lowercase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCAmelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCAmelCase__ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'xlm-prophetnet'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 30522 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 4096 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 128 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = num_encoder_layers
lowerCamelCase_ = num_encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = num_decoder_layers
lowerCamelCase_ = num_decoder_attention_heads
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = init_std # Normal(0, this parameter)
lowerCamelCase_ = activation_function
# parameters for xlmprophetnet
lowerCamelCase_ = ngram
lowerCamelCase_ = num_buckets
lowerCamelCase_ = relative_max_distance
lowerCamelCase_ = disable_ngram_loss
lowerCamelCase_ = eps
# 3 Types of Dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = dropout
lowerCamelCase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 42 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Dict = ['''flax''', '''transformers''']
def __init__( self: Optional[int] , *UpperCamelCase_: Any , **UpperCamelCase_: List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Tuple ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : str = ['''flax''', '''transformers''']
def __init__( self: Dict , *UpperCamelCase_: str , **UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Union[str, Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: int ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Dict = ['''flax''', '''transformers''']
def __init__( self: List[Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , *UpperCamelCase_: Tuple , **UpperCamelCase_: Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: str , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self: List[str] , *UpperCamelCase_: List[str] , **UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: int , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCamelCase_ ( cls: Any , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
| 43 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 45 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase : Union[str, Any] = list[list[float | int]]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Matrix:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = matrix[row][col]
_lowerCamelCase : List[str] = vector[row][0]
_lowerCamelCase : int = 0
_lowerCamelCase : List[Any] = 0
while row < size and col < size:
# pivoting
_lowerCamelCase : Tuple = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCamelCase, _lowerCamelCase : Any = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCamelCase : Any = augmented[rowa][col] / augmented[row][col]
_lowerCamelCase : Any = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase_( _lowerCamelCase ) -> Callable[[int], int]:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCamelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCamelCase : Matrix
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = (x_val + 1) ** (size - col - 1)
_lowerCamelCase : Optional[Any] = y_val
_lowerCamelCase : str = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase_( _lowerCamelCase = question_function , _lowerCamelCase = 10 ) -> int:
'''simple docstring'''
_lowerCamelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCamelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCamelCase : int = 0
_lowerCamelCase : Callable[[int], int]
_lowerCamelCase : int
for poly in polynomials:
_lowerCamelCase : Tuple = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''') | 46 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[Any] = ['image_processor', 'feature_extractor']
snake_case__ :Tuple = 'TvltImageProcessor'
snake_case__ :List[str] = 'TvltFeatureExtractor'
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
super().__init__(image_processor=__magic_name__ , feature_extractor=__magic_name__ )
lowerCAmelCase__ = image_processor
lowerCAmelCase__ = feature_extractor
def __call__( self : Optional[Any] , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Any=None , __magic_name__ : List[Any]=False , __magic_name__ : Union[str, Any]=False , *__magic_name__ : Optional[int] , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
lowerCAmelCase__ = None
if images is not None:
lowerCAmelCase__ = self.image_processor(__magic_name__ , mask_pixel=__magic_name__ , *__magic_name__ , **__magic_name__ )
if images_mixed is not None:
lowerCAmelCase__ = self.image_processor(__magic_name__ , is_mixed=__magic_name__ , *__magic_name__ , **__magic_name__ )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(
__magic_name__ , *__magic_name__ , sampling_rate=__magic_name__ , mask_audio=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = {}
if audio is not None:
output_dict.update(__magic_name__ )
if images is not None:
output_dict.update(__magic_name__ )
if images_mixed_dict is not None:
output_dict.update(__magic_name__ )
return output_dict
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processor.model_input_names
lowerCAmelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 48 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowercase : int = logging.get_logger(__name__)
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , *_lowercase : int , **_lowercase : Tuple ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 49 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = (DDIMParallelScheduler,)
_UpperCamelCase = (('eta', 0.0), ('num_inference_steps', 50))
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowerCAmelCase )
return config
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase )
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = 10, 0.0
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample
return sample
def UpperCamelCase_ ( self ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def UpperCamelCase_ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase ,beta_end=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,sample_max_value=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase ,num_inference_steps=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase ,eta=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 ,4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 ,9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ,4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ,9_98 ) - 0.02 ) ) < 1E-5
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = self.dummy_sample_deter + 0.1
lowerCamelCase__ = self.dummy_sample_deter - 0.1
lowerCamelCase__ = samplea.shape[0]
lowerCamelCase__ = torch.stack([samplea, samplea, samplea] ,dim=0 )
lowerCamelCase__ = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 ,_lowerCAmelCase )
lowerCamelCase__ = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
lowerCamelCase__ = scheduler.batch_step_no_noise(_lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,_lowerCAmelCase )
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.full_loop()
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.full_loop(prediction_type="""v_prediction""" )
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCamelCase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
lowerCamelCase__ = self.full_loop(set_alpha_to_one=_lowerCAmelCase ,beta_start=0.01 )
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCamelCase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
lowerCamelCase__ = self.full_loop(set_alpha_to_one=_lowerCAmelCase ,beta_start=0.01 )
lowerCamelCase__ = torch.sum(torch.abs(_lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 50 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , *a__ : Optional[Any] , **a__ : List[Any] ):
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __snake_case ( self : int , a__ : Union[str, Any]=None ):
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return {}, {}, postprocess_params
def __call__( self : Any , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
return super().__call__(a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : Dict ):
UpperCAmelCase = load_image(a__ )
UpperCAmelCase = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def __snake_case ( self : str , a__ : Optional[Any] ):
UpperCAmelCase = self.model(**a__ )
return model_outputs
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any]=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase, UpperCAmelCase = probs.topk(a__ )
elif self.framework == "tf":
UpperCAmelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase = tf.math.top_k(a__ , k=a__ )
UpperCAmelCase, UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a__ , a__ )]
| 51 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
"""simple docstring"""
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A = os.path.join(git_repo_path, '''src''', '''transformers''')
A = '''
{0} = None
'''
A = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
A = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[Any] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(_UpperCAmelCase )
__a : Optional[int] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(_UpperCAmelCase , '''tokenizers''' )
__a : List[Any] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(_UpperCAmelCase , '''tensorflow_text''' )
__a : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tokenizers''' )
__a : str = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tensorflow_text''' )
__a : Union[str, Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' )
def _lowerCamelCase ( self ):
__a : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _UpperCAmelCase )
self.assertIn('''tensorflow_text''' , _UpperCAmelCase )
self.assertIn('''sentencepiece_and_tokenizers''' , _UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def _lowerCamelCase ( self ):
__a : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , '''\nCONSTANT = None\n''' )
__a : str = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__a : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a : List[str] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a : Any = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _UpperCAmelCase ) | 52 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__lowerCAmelCase = ksize + 1
__lowerCAmelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(lowerCAmelCase_ ):
for x in range(lowerCAmelCase_ ):
# distance from center
__lowerCAmelCase = x - ksize // 2
__lowerCAmelCase = y - ksize // 2
# degree to radiant
__lowerCAmelCase = theta / 180 * np.pi
__lowerCAmelCase = np.cos(_theta )
__lowerCAmelCase = np.sin(_theta )
# get kernel x
__lowerCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
__lowerCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
__lowerCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Any = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : Dict = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case : Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : int = out / out.max() * 255
_snake_case : int = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 53 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : int =logging.get_logger(__name__)
__lowercase : List[str] ={
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A ( __lowercase ):
_snake_case ='''mctct'''
def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any]=8065 , _lowerCAmelCase: int=1536 , _lowerCAmelCase: int=36 , _lowerCAmelCase: Union[str, Any]=6144 , _lowerCAmelCase: Optional[Any]=4 , _lowerCAmelCase: int=384 , _lowerCAmelCase: List[str]=920 , _lowerCAmelCase: Tuple=1e-5 , _lowerCAmelCase: Optional[int]=0.3 , _lowerCAmelCase: Optional[int]="relu" , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: Tuple=0.3 , _lowerCAmelCase: str=0.3 , _lowerCAmelCase: Optional[Any]=1 , _lowerCAmelCase: Optional[int]=0 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: Optional[Any]=1 , _lowerCAmelCase: Optional[Any]=0.3 , _lowerCAmelCase: Tuple=1 , _lowerCAmelCase: List[str]=(7,) , _lowerCAmelCase: int=(3,) , _lowerCAmelCase: Optional[Any]=80 , _lowerCAmelCase: str=1 , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: Optional[Any]="sum" , _lowerCAmelCase: Any=False , **_lowerCAmelCase: Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =attention_head_dim
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =layerdrop
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =pad_token_id
UpperCAmelCase_ =bos_token_id
UpperCAmelCase_ =eos_token_id
UpperCAmelCase_ =conv_glu_dim
UpperCAmelCase_ =conv_dropout
UpperCAmelCase_ =num_conv_layers
UpperCAmelCase_ =input_feat_per_channel
UpperCAmelCase_ =input_channels
UpperCAmelCase_ =conv_channels
UpperCAmelCase_ =ctc_loss_reduction
UpperCAmelCase_ =ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCAmelCase_ =list(_lowerCAmelCase )
UpperCAmelCase_ =list(_lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 54 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "upernet"
def __init__( self : List[Any] ,A : List[Any]=None ,A : Optional[Any]=5_12 ,A : Union[str, Any]=0.02 ,A : Any=[1, 2, 3, 6] ,A : Dict=True ,A : List[Any]=0.4 ,A : Dict=3_84 ,A : List[str]=2_56 ,A : List[str]=1 ,A : Optional[Any]=False ,A : List[Any]=2_55 ,**A : str ,):
super().__init__(**A )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__A = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(A ,A ):
__A = backbone_config.get("model_type" )
__A = CONFIG_MAPPING[backbone_model_type]
__A = config_class.from_dict(A )
__A = backbone_config
__A = hidden_size
__A = initializer_range
__A = pool_scales
__A = use_auxiliary_head
__A = auxiliary_loss_weight
__A = auxiliary_in_channels
__A = auxiliary_channels
__A = auxiliary_num_convs
__A = auxiliary_concat_input
__A = loss_ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = copy.deepcopy(self.__dict__ )
__A = self.backbone_config.to_dict()
__A = self.__class__.model_type
return output
| 55 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_a : List[Any] = logging.get_logger(__name__)
# General docstring
_a : Union[str, Any] = "MobileNetV1Config"
# Base docstring
_a : int = "google/mobilenet_v1_1.0_224"
_a : Any = [1, 1_024, 7, 7]
# Image classification docstring
_a : Any = "google/mobilenet_v1_1.0_224"
_a : Tuple = "tabby, tabby cat"
_a : Tuple = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _a (lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Tuple=None ) -> Optional[int]:
"""simple docstring"""
__snake_case = {}
if isinstance(lowercase__ , lowercase__ ):
__snake_case = model.mobilenet_va
else:
__snake_case = model
__snake_case = 'MobilenetV1/Conv2d_0/'
__snake_case = backbone.conv_stem.convolution.weight
__snake_case = backbone.conv_stem.normalization.bias
__snake_case = backbone.conv_stem.normalization.weight
__snake_case = backbone.conv_stem.normalization.running_mean
__snake_case = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__snake_case = i + 1
__snake_case = i * 2
__snake_case = backbone.layer[pt_index]
__snake_case = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
__snake_case = backbone.layer[pt_index + 1]
__snake_case = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
if isinstance(lowercase__ , lowercase__ ):
__snake_case = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__snake_case = model.classifier.weight
__snake_case = model.classifier.bias
return tf_to_pt_map
def _a (lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> str:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__snake_case = tf.train.list_variables(lowercase__ )
__snake_case = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
__snake_case = tf.train.load_variable(lowercase__ , lowercase__ )
__snake_case = array
# Build TF to PyTorch weights loading map
__snake_case = _build_tf_to_pytorch_map(lowercase__ , lowercase__ , lowercase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
__snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__snake_case = np.transpose(lowercase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__snake_case = array.squeeze().transpose()
else:
__snake_case = np.transpose(lowercase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
__snake_case = torch.from_numpy(lowercase__ )
tf_weights.pop(lowercase__ , lowercase__ )
tf_weights.pop(name + '/RMSProp' , lowercase__ )
tf_weights.pop(name + '/RMSProp_1' , lowercase__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowercase__ )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def _a (lowercase__ : torch.Tensor , lowercase__ : nn.Convad ) -> torch.Tensor:
"""simple docstring"""
__snake_case , __snake_case = features.shape[-2:]
__snake_case , __snake_case = conv_layer.stride
__snake_case , __snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
__snake_case = max(kernel_height - stride_height , 0 )
else:
__snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__snake_case = max(kernel_width - stride_width , 0 )
else:
__snake_case = max(kernel_width - (in_width % stride_width) , 0 )
__snake_case = pad_along_width // 2
__snake_case = pad_along_width - pad_left
__snake_case = pad_along_height // 2
__snake_case = pad_along_height - pad_top
__snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase__ , lowercase__ , 'constant' , 0.0 )
class _lowercase ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool or str] = True , ) -> None:
super().__init__()
__snake_case = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
__snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__snake_case = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , padding_mode='zeros' , )
if use_normalization:
__snake_case = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE_ , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=SCREAMING_SNAKE_CASE_ , track_running_stats=SCREAMING_SNAKE_CASE_ , )
else:
__snake_case = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_ ):
__snake_case = ACTaFN[config.hidden_act]
else:
__snake_case = config.hidden_act
else:
__snake_case = None
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
__snake_case = apply_tf_padding(SCREAMING_SNAKE_CASE_ , self.convolution )
__snake_case = self.convolution(SCREAMING_SNAKE_CASE_ )
if self.normalization is not None:
__snake_case = self.normalization(SCREAMING_SNAKE_CASE_ )
if self.activation is not None:
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return features
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = MobileNetVaConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = load_tf_weights_in_mobilenet_va
_SCREAMING_SNAKE_CASE : Any = "mobilenet_v1"
_SCREAMING_SNAKE_CASE : Optional[Any] = "pixel_values"
_SCREAMING_SNAKE_CASE : List[Any] = False
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_a : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_a : str = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __lowercase , )
class _lowercase ( __lowercase ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : bool = True ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config
__snake_case = 32
__snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
__snake_case = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=2 , )
__snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__snake_case = nn.ModuleList()
for i in range(13 ):
__snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE_ , ) )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=1 , ) )
__snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__snake_case = self.conv_stem(SCREAMING_SNAKE_CASE_ )
__snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__snake_case = layer_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
__snake_case = all_hidden_states + (hidden_states,)
__snake_case = hidden_states
if self.pooler is not None:
__snake_case = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE_ ) , start_dim=1 )
else:
__snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowercase , )
class _lowercase ( __lowercase ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig ) -> None:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config.num_labels
__snake_case = MobileNetVaModel(SCREAMING_SNAKE_CASE_ )
__snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE_ )
__snake_case = nn.Linear(SCREAMING_SNAKE_CASE_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.mobilenet_va(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(self.dropout(SCREAMING_SNAKE_CASE_ ) )
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , )
| 56 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : int = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : int ='''umt5'''
a : Optional[Any] =['''past_key_values''']
def __init__( self , _lowerCamelCase=2_5_0_1_1_2 , _lowerCamelCase=5_1_2 , _lowerCamelCase=6_4 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase=6 , _lowerCamelCase=3_2 , _lowerCamelCase=1_2_8 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-6 , _lowerCamelCase=1.0 , _lowerCamelCase="gated-gelu" , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="T5Tokenizer" , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=0 , **_lowerCamelCase , ):
super().__init__(
is_encoder_decoder=_lowerCamelCase , tokenizer_class=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_: str = vocab_size
UpperCamelCase_: Any = d_model
UpperCamelCase_: Any = d_kv
UpperCamelCase_: Optional[Any] = d_ff
UpperCamelCase_: str = num_layers
UpperCamelCase_: Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase_: Optional[Any] = num_heads
UpperCamelCase_: List[str] = relative_attention_num_buckets
UpperCamelCase_: Union[str, Any] = relative_attention_max_distance
UpperCamelCase_: List[str] = dropout_rate
UpperCamelCase_: str = layer_norm_epsilon
UpperCamelCase_: Dict = initializer_factor
UpperCamelCase_: Optional[int] = feed_forward_proj
UpperCamelCase_: List[Any] = use_cache
UpperCamelCase_: Dict = self.feed_forward_proj.split('-' )
UpperCamelCase_: List[str] = act_info[-1]
UpperCamelCase_: str = act_info[0] == 'gated'
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
UpperCamelCase_: int = 'gelu_new'
@property
def _a ( self ):
return self.d_model
@property
def _a ( self ):
return self.num_heads
@property
def _a ( self ):
return self.num_layers
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self ):
UpperCamelCase_: Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCamelCase_: Tuple = 'past_encoder_sequence + sequence'
UpperCamelCase_: Any = {0: 'batch'}
UpperCamelCase_: Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase_: Tuple = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase_: Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self ):
return 1_3
@property
def _a ( self ):
return 5e-4 | 57 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
snake_case_ : List[str] = 2**power
snake_case_ : List[Any] = 0
while n:
snake_case_ , snake_case_ : str = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__A = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "ernie_m"
lowercase_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__(self : Any , UpperCAmelCase_ : int = 250_002 , UpperCAmelCase_ : int = 768 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : int = 3_072 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 514 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 1E-0_5 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=0.0 , **UpperCAmelCase_ : str , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: List[Any] =vocab_size
lowerCamelCase__: Tuple =hidden_size
lowerCamelCase__: Optional[Any] =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: List[str] =intermediate_size
lowerCamelCase__: Optional[int] =hidden_act
lowerCamelCase__: Optional[Any] =hidden_dropout_prob
lowerCamelCase__: Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__: Optional[Any] =max_position_embeddings
lowerCamelCase__: Union[str, Any] =initializer_range
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: List[str] =classifier_dropout
lowerCamelCase__: Union[str, Any] =is_decoder
lowerCamelCase__: str =act_dropout
| 59 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase_ = TypeVar('''T''')
class __lowerCAmelCase ( Generic[T] ):
def __init__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : Any | T = None
snake_case_ : int = len(__magic_name__ )
snake_case_ : list[T] = [any_type for _ in range(self.N )] + arr
snake_case_ : Optional[int] = fnc
self.build()
def lowerCamelCase (self ) -> None:
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
snake_case_ : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
p += self.N
snake_case_ : Dict = v
while p > 1:
snake_case_ : List[str] = p // 2
snake_case_ : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> T | None: # noqa: E741
'''simple docstring'''
snake_case_ , snake_case_ : int = l + self.N, r + self.N
snake_case_ : T | None = None
while l <= r:
if l % 2 == 1:
snake_case_ : Optional[Any] = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] )
if r % 2 == 0:
snake_case_ : Optional[int] = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] )
snake_case_ , snake_case_ : Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase_ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCAmelCase_ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCAmelCase_ = SegmentTree(test_array, min)
lowerCAmelCase_ = SegmentTree(test_array, max)
lowerCAmelCase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(_UpperCamelCase , len(_UpperCamelCase ) ):
snake_case_ : Dict = reduce(_UpperCamelCase , test_array[i : j + 1] )
snake_case_ : Tuple = reduce(_UpperCamelCase , test_array[i : j + 1] )
snake_case_ : Tuple = reduce(lambda _UpperCamelCase , _UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
assert max_range == max_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
assert sum_range == sum_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 60 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = XLNetTokenizer
snake_case__ = XLNetTokenizerFast
snake_case__ = True
snake_case__ = True
def a ( self : str ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = "<s>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_006 )
def a ( self : int ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def a ( self : Any ) -> Any:
lowerCAmelCase__ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def a ( self : Union[str, Any] ) -> Any:
# fmt: off
lowerCAmelCase__ = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 61 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = {}
def _A ( self : str ):
print(self.vertex )
for i in self.vertex:
print(UpperCAmelCase_ , " -> " , " -> ".join([str(UpperCAmelCase_ ) for j in self.vertex[i]] ) )
def _A ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCAmelCase_ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE : List[str] = [to_vertex]
def _A ( self : int ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE : Any = True
print(UpperCAmelCase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 62 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( UpperCamelCase_ ):
__a = (DPMSolverSinglestepScheduler,)
__a = (("num_inference_steps", 25),)
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Any= {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**lowerCAmelCase )
return config
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Union[str, Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_sample
SCREAMING_SNAKE_CASE__: List[Any]= 0.1 * sample
SCREAMING_SNAKE_CASE__: Optional[Any]= [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: str= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: Tuple= dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: int= dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE__: str= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: str= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: List[Any]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_sample
SCREAMING_SNAKE_CASE__: Dict= 0.1 * sample
SCREAMING_SNAKE_CASE__: Optional[Any]= [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_scheduler_config()
SCREAMING_SNAKE_CASE__: Tuple= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: Tuple= dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: Union[str, Any]= dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__: Dict= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: str= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self , lowerCAmelCase=None , **lowerCAmelCase ) -> List[Any]:
if scheduler is None:
SCREAMING_SNAKE_CASE__: Dict= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= 10
SCREAMING_SNAKE_CASE__: Optional[Any]= self.dummy_model()
SCREAMING_SNAKE_CASE__: List[Any]= self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__: Tuple= 50
SCREAMING_SNAKE_CASE__: List[Any]= self.dummy_model()
SCREAMING_SNAKE_CASE__: List[str]= self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def UpperCamelCase_ ( self ) -> str:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE__: Optional[int]= DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__: int= self.full_loop(scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
SCREAMING_SNAKE_CASE__: Tuple= DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: str= DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: Dict= UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: Union[str, Any]= DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: List[Any]= self.full_loop(scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCamelCase_ ( self ) -> int:
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type='''dpmsolver++''' , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def UpperCamelCase_ ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: int= self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self ) -> int:
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.check_over_configs(variance_type=lowerCAmelCase )
self.check_over_configs(variance_type='''learned_range''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[str]= self.full_loop()
SCREAMING_SNAKE_CASE__: List[Any]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.full_loop(use_karras_sigmas=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: int= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: int= self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE__: Optional[int]= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= 10
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_model()
SCREAMING_SNAKE_CASE__: Any= self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 64 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
try:
UpperCAmelCase__ : Union[str, Any] = float(__UpperCamelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
UpperCAmelCase__ : List[str] = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
UpperCAmelCase__ : Optional[Any] = len(str(__UpperCamelCase ).split(""".""" )[1] )
UpperCAmelCase__ : List[Any] = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase__ : Any = 10**number_of_frac_digits
UpperCAmelCase__ , UpperCAmelCase__ : int = denominator, numerator
while True:
UpperCAmelCase__ : int = dividend % divisor
if remainder == 0:
break
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = divisor, remainder
UpperCAmelCase__ , UpperCAmelCase__ : int = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 65 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : int = args.pruning_method
_lowercase : Optional[int] = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Optional[Any] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_lowercase : int = torch.load(os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_lowercase : Tuple = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_lowercase : List[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_lowercase : Optional[int] = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
_lowercase : Dict = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : List[Any] = name[:-6]
_lowercase : Dict = model[F"""{prefix_}mask_scores"""]
_lowercase : Any = TopKBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : Dict = name[:-6]
_lowercase : Union[str, Any] = model[F"""{prefix_}mask_scores"""]
_lowercase : Any = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : str = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[int] = model[F"""{prefix_}mask_scores"""]
_lowercase , _lowercase : List[str] = -0.1, 1.1
_lowercase : int = torch.sigmoid(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = s * (r - l) + l
_lowercase : Union[str, Any] = s_bar.clamp(min=0.0 , max=1.0 )
_lowercase : Optional[int] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : Tuple = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE ) , F"""bertarized_{os.path.basename(SCREAMING_SNAKE_CASE )}""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
shutil.copytree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCamelCase = parser.parse_args()
main(args)
| 66 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,__A : int = 16 ,__A : int = 88 ,__A : Optional[int] = None ,__A : int = 1 ,__A : float = 0.0 ,__A : int = 32 ,__A : Optional[int] = None ,__A : bool = False ,__A : Optional[int] = None ,__A : Optional[int] = None ,__A : str = "geglu" ,__A : Optional[int] = None ,) -> Optional[int]:
super().__init__()
_lowercase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__A ,attention_head_dim=__A ,in_channels=__A ,num_layers=__A ,dropout=__A ,norm_num_groups=__A ,cross_attention_dim=__A ,attention_bias=__A ,sample_size=__A ,num_vector_embeds=__A ,activation_fn=__A ,num_embeds_ada_norm=__A ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase = [1, 0]
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Union[str, Any] ,__A : Tuple ,__A : Any=None ,__A : Optional[int]=None ,__A : Tuple=None ,__A : bool = True ,) -> Optional[Any]:
_lowercase = hidden_states
_lowercase = []
_lowercase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase = self.transformer_index_for_condition[i]
_lowercase = self.transformers[transformer_index](
__A ,encoder_hidden_states=__A ,timestep=__A ,cross_attention_kwargs=__A ,return_dict=__A ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__A ) | 67 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : Any ) -> Tuple:
__UpperCAmelCase =(0, 0)
__UpperCAmelCase =None
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =0
def __eq__( self : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
return self.position == cell.position
def _a ( self : str ) -> Any:
print(self.position )
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=(5, 5) ) -> int:
__UpperCAmelCase =np.zeros(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =world_size[0]
__UpperCAmelCase =world_size[1]
def _a ( self : Optional[Any] ) -> Tuple:
print(self.w )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> int:
__UpperCAmelCase =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCAmelCase =cell.position[0]
__UpperCAmelCase =cell.position[1]
__UpperCAmelCase =[]
for n in neughbour_cord:
__UpperCAmelCase =current_x + n[0]
__UpperCAmelCase =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCAmelCase =Cell()
__UpperCAmelCase =(x, y)
__UpperCAmelCase =cell
neighbours.append(__SCREAMING_SNAKE_CASE )
return neighbours
def lowercase__ ( A_: Dict , A_: Tuple , A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[]
__UpperCAmelCase =[]
_open.append(A_ )
while _open:
__UpperCAmelCase =np.argmin([n.f for n in _open] )
__UpperCAmelCase =_open[min_f]
_closed.append(_open.pop(A_ ) )
if current == goal:
break
for n in world.get_neigbours(A_ ):
for c in _closed:
if c == n:
continue
__UpperCAmelCase =current.g + 1
__UpperCAmelCase , __UpperCAmelCase =n.position
__UpperCAmelCase , __UpperCAmelCase =goal.position
__UpperCAmelCase =(ya - ya) ** 2 + (xa - xa) ** 2
__UpperCAmelCase =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(A_ )
__UpperCAmelCase =[]
while current.parent is not None:
path.append(current.position )
__UpperCAmelCase =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A = Gridworld()
# Start position and goal
__A = Cell()
__A = (0, 0)
__A = Cell()
__A = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
__A = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A = 1
print(world.w)
| 68 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def A ( *a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@require_torch
def A ( self : int ):
"""simple docstring"""
__snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
] , )
@require_tf
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(a_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
] , )
@slow
@require_torch
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def A ( self : int ):
"""simple docstring"""
__snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 69 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = (boundary[1] - boundary[0]) / steps
lowerCamelCase_ = boundary[0]
lowerCamelCase_ = boundary[1]
lowerCamelCase_ = make_points(lowercase , lowercase , lowercase )
lowerCamelCase_ = 0.0
y += (h / 2.0) * f(lowercase )
for i in x_i:
# print(i)
y += h * f(lowercase )
y += (h / 2.0) * f(lowercase )
return y
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = a + h
while x < (b - h):
yield x
lowerCamelCase_ = x + h
def _SCREAMING_SNAKE_CASE ( lowercase : str ): # enter your function here
'''simple docstring'''
lowerCamelCase_ = (x - 0) * (x - 0)
return y
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0.0 # Lower bound of integration
lowerCamelCase_ = 1.0 # Upper bound of integration
lowerCamelCase_ = 10.0 # define number of steps or resolution
lowerCamelCase_ = [a, b] # define boundary of integration
lowerCamelCase_ = method_a(lowercase , lowercase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 70 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['note_seq']
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def _A( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def _A( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ['''note_seq'''] )
| 72 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : List[str] = '''blip_text_model'''
def __init__( self , a=3_0524 , a=768 , a=768 , a=3072 , a=768 , a=12 , a=8 , a=512 , a="gelu" , a=1E-12 , a=0.0 , a=0.0 , a=0.02 , a=3_0522 , a=2 , a=0 , a=102 , a=True , a=True , **a , ) -> Any:
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , sep_token_id=a , **a , )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = encoder_hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = use_cache
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> "PretrainedConfig":
cls._set_token_in_kwargs(a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(a , **a)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
SCREAMING_SNAKE_CASE = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(a , **a)
class _snake_case ( A__ ):
_lowercase : Any = '''blip_vision_model'''
def __init__( self , a=768 , a=3072 , a=512 , a=12 , a=12 , a=384 , a=16 , a="gelu" , a=1E-5 , a=0.0 , a=1E-10 , **a , ) -> Optional[int]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> "PretrainedConfig":
cls._set_token_in_kwargs(a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(a , **a)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
SCREAMING_SNAKE_CASE = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(a , **a)
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''blip'''
_lowercase : List[Any] = True
def __init__( self , a=None , a=None , a=512 , a=2.65_92 , a=256 , **a , ) -> Any:
super().__init__(**a)
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.')
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.')
SCREAMING_SNAKE_CASE = BlipTextConfig(**a)
SCREAMING_SNAKE_CASE = BlipVisionConfig(**a)
SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = logit_scale_init_value
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = image_text_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , a , **a) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 73 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = datasets.Audio()
lowerCAmelCase_ = '''audio'''
lowerCAmelCase_ = AudioFolderConfig
lowerCAmelCase_ = 42 # definition at the bottom of the script
lowerCAmelCase_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
lowercase_ = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowercase_ = AUDIO_EXTENSIONS
| 74 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase_ ( __a ):
def __init__( self : Union[str, Any] , _A : int , _A : List[str] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = dataset
UpperCAmelCase__ : List[Any] = process
UpperCAmelCase__ : List[Any] = params
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.dataset[i]
UpperCAmelCase__ : str = self.process(_A , **self.params )
return processed
class lowerCamelCase_ ( __a ):
def __init__( self : Tuple , _A : int , _A : Tuple , _A : Optional[int] , _A : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = loader
UpperCAmelCase__ : Tuple = infer
UpperCAmelCase__ : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : List[Any] = loader_batch_size
# Internal bookkeeping
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : str = None
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = iter(self.loader )
return self
def lowercase_ ( self : Tuple ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase__ : Dict = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase__ : Optional[int] = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
UpperCAmelCase__ : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : Dict = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase__ : Any = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Any = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase__ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase__ : Optional[int] = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def lowercase_ ( self : str ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase__ : Dict = next(self.iterator )
UpperCAmelCase__ : Tuple = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
UpperCAmelCase__ : str = processed
else:
UpperCAmelCase__ : Dict = list(processed.keys() )[0]
UpperCAmelCase__ : List[Any] = processed[key]
if isinstance(_A , _A ):
UpperCAmelCase__ : Optional[Any] = len(_A )
else:
UpperCAmelCase__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : Dict = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase__ : List[str] = processed
UpperCAmelCase__ : List[str] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase_ ( __a ):
def __init__( self : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Optional[Any] , _A : str=None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = iter(self.loader )
UpperCAmelCase__ : Union[str, Any] = None
return self
def lowercase_ ( self : str ):
'''simple docstring'''
if self.subiterator is None:
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase__ : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase__ : List[Any] = next(self.subiterator )
return processed
class lowerCamelCase_ ( __a ):
def __iter__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = iter(self.loader )
return self
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : str = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : Optional[Any] = self.loader_batch_item()
UpperCAmelCase__ : Dict = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase__ : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
UpperCAmelCase__ : Union[str, Any] = processed
else:
UpperCAmelCase__ : List[str] = list(processed.keys() )[0]
UpperCAmelCase__ : List[Any] = processed[key]
if isinstance(_A , _A ):
UpperCAmelCase__ : Tuple = len(_A )
else:
UpperCAmelCase__ : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : int = observed_batch_size
UpperCAmelCase__ : str = processed
UpperCAmelCase__ : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : Tuple = self.loader_batch_item()
UpperCAmelCase__ : List[Any] = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
else:
UpperCAmelCase__ : int = processed
UpperCAmelCase__ : List[str] = item.pop('''is_last''' )
accumulator.append(_A )
return accumulator
class lowerCamelCase_ ( __a ):
def __init__( self : List[Any] , _A : Dataset , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = dataset
UpperCAmelCase__ : str = key
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , _A : Union[str, Any] ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[int] , _A : Dataset , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = dataset
UpperCAmelCase__ : List[str] = keya
UpperCAmelCase__ : int = keya
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 75 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = 'src/transformers'
# Matches is_xxx_available()
a_ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a_ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a_ = re.compile(r'^\s*try:')
# Catches a line with else:
a_ = re.compile(r'^\s*else:')
def __UpperCAmelCase ( __UpperCamelCase ):
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
__lowercase : List[str] = [b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase ):
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase : Tuple = f.readlines()
__lowercase : str = 0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__lowercase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
__lowercase : int = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
__lowercase : Union[str, Any] = re.findall('''\[([^\]]+)\]''' , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__lowercase : Tuple = _re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
__lowercase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase : Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__lowercase : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
__lowercase : Optional[Any] = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(''', ''' )
__lowercase : Tuple = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
__lowercase : int = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(''', ''' )
__lowercase : int = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase : Union[str, Any] = []
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__lowercase : List[str] = lines[line_index]
__lowercase : Optional[Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase : Tuple = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__lowercase : Optional[Any] = lines[line_index]
__lowercase : Optional[int] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
def find_duplicates(__UpperCamelCase ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase : List[str] = []
for key in import_dict_objects.keys():
__lowercase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase : List[Any] = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCAmelCase ( ):
__lowercase : Tuple = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
__lowercase : Optional[int] = os.path.join(__UpperCamelCase , '''__init__.py''' )
__lowercase : Dict = parse_init(__UpperCamelCase )
if objects is not None:
__lowercase : Dict = analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
__lowercase : str = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(__UpperCamelCase ) )
def __UpperCAmelCase ( ):
__lowercase : int = []
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__lowercase : Tuple = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
__lowercase : List[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__lowercase : Optional[Any] = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
__lowercase : List[str] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
a_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __UpperCAmelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
__lowercase : str = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__UpperCamelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase : List[Any] = spec.loader.load_module()
__lowercase : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCamelCase ) > 0:
__lowercase : Optional[Any] = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
# Construct model
if openai_config_file == "":
__UpperCAmelCase : List[str] = OpenAIGPTConfig()
else:
__UpperCAmelCase : Dict = OpenAIGPTConfig.from_json_file(UpperCamelCase )
__UpperCAmelCase : Tuple = OpenAIGPTModel(UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
__UpperCAmelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__UpperCAmelCase : Dict = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 77 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
'''simple docstring'''
from collections import namedtuple
SCREAMING_SNAKE_CASE_: Optional[int] =namedtuple('from_to', 'from_ to')
SCREAMING_SNAKE_CASE_: str ={
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 10_00),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : str , snake_case_ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(snake_case_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(snake_case_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCamelCase )
return parser.parse_args()
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = parse_args()
# Import training_script as a module.
UpperCAmelCase__ : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase__ : List[str] = script_fpath.stem
UpperCAmelCase__ : Optional[Any] = importlib.import_module(__lowerCamelCase )
# Patch sys.argv
UpperCAmelCase__ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 79 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=99 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : Tuple=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : int=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = DistilBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = DistilBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DistilBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DistilBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DistilBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = DistilBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case :Dict = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Tuple = True
__snake_case :Tuple = True
__snake_case :List[str] = True
__snake_case :Optional[int] = True
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = DistilBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , dim=37 )
def _a ( self : Dict ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCAmelCase )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCAmelCase )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCAmelCase )
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCAmelCase )
@slow
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DistilBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
@require_torch_gpu
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = torch.jit.trace(
_lowerCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """traced_model.pt""" ) )
__lowercase = torch.jit.load(os.path.join(_lowerCAmelCase , """traced_model.pt""" ) , map_location=_lowerCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_lowerCAmelCase ) , inputs_dict["""attention_mask"""].to(_lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__lowercase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) )
| 80 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case : str = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = "AutoTokenizer"
__UpperCAmelCase : List[Any] = ["tokenizer"]
__UpperCAmelCase : Tuple = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]:
super().__init__(lowerCamelCase )
__snake_case : Tuple = speaker_embeddings
@classmethod
def __snake_case ( cls : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Optional[int]="speaker_embeddings_path.json" , **lowerCamelCase : List[Any] ) -> List[str]:
if speaker_embeddings_dict_path is not None:
__snake_case : Any = get_file_from_repo(
lowerCamelCase , lowerCamelCase , subfolder=kwargs.pop("subfolder" , lowerCamelCase ) , cache_dir=kwargs.pop("cache_dir" , lowerCamelCase ) , force_download=kwargs.pop("force_download" , lowerCamelCase ) , proxies=kwargs.pop("proxies" , lowerCamelCase ) , resume_download=kwargs.pop("resume_download" , lowerCamelCase ) , local_files_only=kwargs.pop("local_files_only" , lowerCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCamelCase ) , revision=kwargs.pop("revision" , lowerCamelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCamelCase , lowerCamelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
__snake_case : List[str] = None
else:
with open(lowerCamelCase ) as speaker_embeddings_json:
__snake_case : List[Any] = json.load(lowerCamelCase )
else:
__snake_case : Dict = None
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase , **lowerCamelCase )
return cls(tokenizer=lowerCamelCase , speaker_embeddings=lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[str]="speaker_embeddings_path.json" , lowerCamelCase : List[Any]="speaker_embeddings" , lowerCamelCase : bool = False , **lowerCamelCase : List[Any] , ) -> str:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase , lowerCamelCase , "v2" ) , exist_ok=lowerCamelCase )
__snake_case : List[Any] = {}
__snake_case : Optional[int] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__snake_case : List[str] = self._load_voice_preset(lowerCamelCase )
__snake_case : int = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , lowerCamelCase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCamelCase , )
__snake_case : Optional[Any] = os.path.join(lowerCamelCase , F'{prompt_key}_{key}.npy' )
__snake_case : List[str] = tmp_dict
with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "w" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
super().save_pretrained(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : str = None , **lowerCamelCase : int ) -> Union[str, Any]:
__snake_case : Optional[int] = self.speaker_embeddings[voice_preset]
__snake_case : List[str] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
__snake_case : int = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , lowerCamelCase ) , cache_dir=kwargs.pop("cache_dir" , lowerCamelCase ) , force_download=kwargs.pop("force_download" , lowerCamelCase ) , proxies=kwargs.pop("proxies" , lowerCamelCase ) , resume_download=kwargs.pop("resume_download" , lowerCamelCase ) , local_files_only=kwargs.pop("local_files_only" , lowerCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCamelCase ) , revision=kwargs.pop("revision" , lowerCamelCase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
__snake_case : Tuple = np.load(lowerCamelCase )
return voice_preset_dict
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[dict] = None ) -> Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Optional[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Dict="pt" , lowerCamelCase : List[Any]=256 , lowerCamelCase : str=False , lowerCamelCase : Optional[int]=True , lowerCamelCase : str=False , **lowerCamelCase : Any , ) -> List[Any]:
if voice_preset is not None and not isinstance(lowerCamelCase , lowerCamelCase ):
if (
isinstance(lowerCamelCase , lowerCamelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__snake_case : int = self._load_voice_preset(lowerCamelCase )
else:
if isinstance(lowerCamelCase , lowerCamelCase ) and not voice_preset.endswith(".npz" ):
__snake_case : List[str] = voice_preset + ".npz"
__snake_case : Union[str, Any] = np.load(lowerCamelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase , **lowerCamelCase )
__snake_case : str = BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
__snake_case : Dict = self.tokenizer(
lowerCamelCase , return_tensors=lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
if voice_preset is not None:
__snake_case : Tuple = voice_preset
return encoded_text
| 81 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = ""
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case_ ( A_ : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def snake_case_ ( A_ : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = '''Morse code here!'''
print(A_ )
_lowerCamelCase : Optional[Any] = encrypt(A_ )
print(A_ )
_lowerCamelCase : Optional[Any] = decrypt(A_ )
print(A_ )
if __name__ == "__main__":
main()
| 83 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowercase = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase = DatasetInfo.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , 'dataset_info.json' ) )
def UpperCAmelCase_ ( ):
lowercase = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase = dataset_info._to_yaml_dict()
assert sorted(__SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase = yaml.safe_dump(__SCREAMING_SNAKE_CASE )
lowercase = yaml.safe_load(__SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ):
lowercase = DatasetInfo()
lowercase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , 'README.md' ) )
| 84 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = StableDiffusionInstructPixaPixPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase( self : str )-> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' )
if str(a_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries'
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2
SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(a_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae']
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
SCREAMING_SNAKE_CASE__ : Tuple = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __lowercase( self : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 0
def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
pipe(**a_ , callback=a_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase( self : int )-> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) )
SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
a_ , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 85 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''height''': 18, '''width''': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = EfficientFormerImageProcessorTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 87 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_lowerCamelCase : Dict = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__snake_case )
# Let's go
_lowerCamelCase : Any = parser.parse_args()
if not hasattr(__snake_case , """func""" ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : List[str] = args.func(__snake_case )
service.run()
if __name__ == "__main__":
main()
| 88 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowerCamelCase:
lowercase_ : float
lowercase_ : TreeNode | None = None
lowercase_ : TreeNode | None = None
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
# Validation
def is_valid_tree(lowerCamelCase_ ) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCamelCase_ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCamelCase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCamelCase_ )
)
return is_binary_search_tree_recursive_check(lowerCamelCase_ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ) -> Tuple:
lowerCAmelCase__ = HfArgumentParser(A )
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase__ = TensorFlowBenchmark(args=A )
try:
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase__ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowerCAmelCase__ = ''' '''.join(str(A ).split(''' ''' )[:-1] )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = eval(str(A ).split(''' ''' )[-1] )
lowerCAmelCase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A )
if len(A ) > 0:
lowerCAmelCase__ = full_error_msg + begin_error_msg + str(A )
raise ValueError(A )
benchmark.run()
if __name__ == "__main__":
main() | 90 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(UpperCAmelCase__ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase : list[float] =list(UpperCAmelCase__ )
lowercase : Union[str, Any] =degree
def __add__( self : Any , UpperCAmelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase : Optional[int] =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCAmelCase__ )
else:
lowercase : Dict =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCAmelCase__ )
def __sub__( self : List[str] , UpperCAmelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Any ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , UpperCAmelCase__ : Polynomial ):
'''simple docstring'''
lowercase : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int | float ):
'''simple docstring'''
lowercase : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCAmelCase__ )
return polynomial
def __repr__( self : Dict ):
'''simple docstring'''
return self.__str__()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : list[float] =[0] * self.degree
for i in range(self.degree ):
lowercase : Tuple =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int | float = 0 ):
'''simple docstring'''
lowercase : list[float] =[0] * (self.degree + 2)
lowercase : str =constant
for i in range(self.degree + 1 ):
lowercase : Union[str, Any] =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCAmelCase__ )
def __eq__( self : str , UpperCAmelCase__ : object ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , UpperCAmelCase__ : object ):
'''simple docstring'''
return not self.__eq__(UpperCAmelCase__ )
| 92 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=9_9 , __UpperCAmelCase=0 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase="last" , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parent
lowerCAmelCase__ :Optional[Any] = batch_size
lowerCAmelCase__ :Dict = seq_length
lowerCAmelCase__ :Optional[int] = is_training
lowerCAmelCase__ :Tuple = use_input_lengths
lowerCAmelCase__ :List[str] = use_token_type_ids
lowerCAmelCase__ :str = use_labels
lowerCAmelCase__ :List[str] = gelu_activation
lowerCAmelCase__ :List[Any] = sinusoidal_embeddings
lowerCAmelCase__ :Any = causal
lowerCAmelCase__ :Union[str, Any] = asm
lowerCAmelCase__ :int = n_langs
lowerCAmelCase__ :Any = vocab_size
lowerCAmelCase__ :List[Any] = n_special
lowerCAmelCase__ :int = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :str = hidden_dropout_prob
lowerCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :Optional[int] = type_vocab_size
lowerCAmelCase__ :str = type_sequence_label_size
lowerCAmelCase__ :str = initializer_range
lowerCAmelCase__ :Optional[Any] = num_labels
lowerCAmelCase__ :List[Any] = num_choices
lowerCAmelCase__ :str = summary_type
lowerCAmelCase__ :Tuple = use_proj
lowerCAmelCase__ :Tuple = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ :Union[str, Any] = None
if self.use_input_lengths:
lowerCAmelCase__ :Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase__ :str = None
if self.use_token_type_ids:
lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase__ :List[Any] = None
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :str = None
if self.use_labels:
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase__ :List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = FlaubertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Any = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , langs=__UpperCAmelCase )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = FlaubertWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :str = FlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase )
lowerCAmelCase__ :int = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = FlaubertForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Any = model(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
lowerCAmelCase__ :Union[str, Any] = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
((lowerCAmelCase__) , ) :Dict = result_with_labels.to_tuple()
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
((lowerCAmelCase__) , ) :int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = FlaubertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = self.num_labels
lowerCAmelCase__ :Dict = FlaubertForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.num_choices
lowerCAmelCase__ :Union[str, Any] = FlaubertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ :List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[Any] = config_and_inputs
lowerCAmelCase__ :Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__magic_name__ :List[str] = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase__ :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = FlaubertModelTester(self )
lowerCAmelCase__ :Dict = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Optional[int] = FlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase__ :List[str] = True
lowerCAmelCase__ :Optional[int] = model_class(config=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.jit.trace(
__UpperCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'traced_model.pt' ) )
lowerCAmelCase__ :Optional[Any] = torch.jit.load(os.path.join(__UpperCAmelCase , 'traced_model.pt' ) , map_location=__UpperCAmelCase )
loaded(inputs_dict['input_ids'].to(__UpperCAmelCase ) , inputs_dict['attention_mask'].to(__UpperCAmelCase ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
lowerCAmelCase__ :Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase )[0]
lowerCAmelCase__ :Union[str, Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 93 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__A , __A ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase : Optional[Any] =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__A )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
# Check if the input is valid
if not len(A__ ) == len(A__ ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : int = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : List[str] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : int = determinant_x / determinant
UpperCAmelCase_ : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 95 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["input_features"]
def __init__( self : str , __snake_case : List[Any]=8_0 , __snake_case : Dict=1_6_0_0_0 , __snake_case : Union[str, Any]=1_6_0 , __snake_case : Optional[int]=3_0 , __snake_case : List[str]=4_0_0 , __snake_case : Any=0.0 , __snake_case : int=False , **__snake_case : Optional[Any] , ) -> Optional[Any]:
super().__init__(
feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
__magic_name__: List[str] = n_fft
__magic_name__: Dict = hop_length
__magic_name__: Optional[Any] = chunk_length
__magic_name__: List[Any] = chunk_length * sampling_rate
__magic_name__: List[Any] = self.n_samples // hop_length
__magic_name__: List[str] = sampling_rate
__magic_name__: str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__snake_case , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__snake_case , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase__ ( self : str , __snake_case : np.array ) -> np.ndarray:
__magic_name__: Tuple = spectrogram(
__snake_case , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__magic_name__: Optional[Any] = log_spec[:, :-1]
__magic_name__: int = np.maximum(__snake_case , log_spec.max() - 8.0 )
__magic_name__: Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase__ ( __snake_case : List[np.ndarray] , __snake_case : List[np.ndarray] , __snake_case : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__magic_name__: str = np.array(__snake_case , np.intaa )
__magic_name__: Dict = []
for vector, length in zip(__snake_case , attention_mask.sum(-1 ) ):
__magic_name__: Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__magic_name__: Dict = padding_value
normed_input_values.append(__snake_case )
else:
__magic_name__: List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : bool = True , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "max_length" , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , **__snake_case : Optional[Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__magic_name__: Dict = isinstance(__snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__magic_name__: Optional[Any] = is_batched_numpy or (
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__: List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
__magic_name__: List[str] = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__magic_name__: str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__: str = [np.asarray([raw_speech] ).T]
__magic_name__: str = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__magic_name__: Optional[Any] = self.pad(
__snake_case , padding=__snake_case , max_length=max_length if max_length else self.n_samples , truncation=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__magic_name__: List[Any] = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__magic_name__: List[Any] = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__magic_name__: int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__magic_name__: Union[str, Any] = [self._np_extract_fbank_features(__snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] , __snake_case ):
__magic_name__: Dict = [np.asarray(__snake_case , dtype=np.floataa ) for feature in input_features]
else:
__magic_name__: List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__magic_name__: Optional[Any] = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__magic_name__: Optional[int] = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
def lowerCamelCase__ ( self : List[str] ) -> Dict[str, Any]:
__magic_name__: Tuple = copy.deepcopy(self.__dict__ )
__magic_name__: Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 96 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__a = trt.Logger(trt.Logger.WARNING)
__a = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__a = logging.getLogger(__name__)
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__a = parser.parse_args()
if args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__a = args.per_device_eval_batch_size
__a = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__a = True
__a = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__a = 'temp_engine/bert-fp16.engine'
if args.inta:
__a = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__a = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__a = [network.get_input(i) for i in range(network.num_inputs)]
__a = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__a = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__a = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__a = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def a ( snake_case__: str , snake_case__: List[str] , snake_case__: List[Any] , snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case__ )
# start time
lowercase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case__ ) for d_inp in d_inputs] + [int(snake_case__ ), int(snake_case__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase_ = time.time()
lowercase_ = end_time - start_time
lowercase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__a = raw_datasets['validation'].column_names
__a = 'question' if 'question' in column_names else column_names[0]
__a = 'context' if 'context' in column_names else column_names[1]
__a = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__a = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__a = min(args.max_seq_length, tokenizer.model_max_length)
def a ( snake_case__: Tuple ):
'''simple docstring'''
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case__ , stride=args.doc_stride , return_overflowing_tokens=snake_case__ , return_offsets_mapping=snake_case__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase_ = tokenized_examples.sequence_ids(snake_case__ )
lowercase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__a = raw_datasets['validation']
# Validation Feature Creation
__a = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__a = default_data_collator
__a = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__a = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a ( snake_case__: Tuple , snake_case__: int , snake_case__: Union[str, Any] , snake_case__: Optional[int]="eval" ):
'''simple docstring'''
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase_ = postprocess_qa_predictions(
examples=snake_case__ , features=snake_case__ , predictions=snake_case__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case__ , label_ids=snake_case__ )
__a = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a ( snake_case__: Dict ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(snake_case__ ) ) * engine.get_binding_dtype(snake_case__ ).itemsize
# Allocate device memory for inputs and outputs.
__a = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__a = cuda.mem_alloc(h_outputa.nbytes)
__a = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__a = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__a = 0.0
__a = 0
__a = timeit.default_timer()
__a = None
for step, batch in enumerate(eval_dataloader):
__a , __a = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__a , __a = outputs
__a = torch.tensor(start_logits)
__a = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__a = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__a = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__a = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__a = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__a = nested_truncate(all_preds, len(eval_dataset))
__a = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
__a = post_processing_function(eval_examples, eval_dataset, all_preds)
__a = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 97 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
'''simple docstring'''
def a__ ( lowercase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(lowercase, (list, tuple) ) or not all(
isinstance(lowercase, lowercase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
_UpperCamelCase = _UpperCamelCase = _UpperCamelCase = numbers[0]
for i in range(1, len(lowercase ) ):
# update the maximum and minimum subarray products
_UpperCamelCase = numbers[i]
if number < 0:
_UpperCamelCase , _UpperCamelCase = min_till_now, max_till_now
_UpperCamelCase = max(lowercase, max_till_now * number )
_UpperCamelCase = min(lowercase, min_till_now * number )
# update the maximum product found till now
_UpperCamelCase = max(lowercase, lowercase )
return max_prod
| 98 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = '''last'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertWithLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForQuestionAnsweringSimple(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFFlaubertForTokenClassification(config=A_ )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = TFFlaubertForMultipleChoice(config=A_ )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase__ : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
SCREAMING_SNAKE_CASE__ = model(A_ )[0]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 100 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE_ : int = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE_ : Any = CLIPTextModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Optional[Any] = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : str = ldmad_pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[str] = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : List[str] = ldmad_pipe.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : str = text_inputs['input_ids'].to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : str = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = 'french fries'
SCREAMING_SNAKE_CASE_ : str = ldmad_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : List[Any] = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = ldmad_pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE_ : Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 5_0,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.495_586
SCREAMING_SNAKE_CASE_ : str = 0.33_795_515
SCREAMING_SNAKE_CASE_ : Optional[int] = 112.48_518
SCREAMING_SNAKE_CASE_ : Optional[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = ldmad_pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Dict = 0.4_194_127
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.35_375_586
SCREAMING_SNAKE_CASE_ : int = 0.5_638_502
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.34_686_103
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 101 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ : int = logging.get_logger(__name__)
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Any = ["""input_features""", """is_longer"""]
def __init__( self , _A=6_4 , _A=4_8_0_0_0 , _A=4_8_0 , _A=1_0 , _A=1_0_2_4 , _A=0.0 , _A=False , _A = 0 , _A = 1_4_0_0_0 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ):
'''simple docstring'''
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
UpperCamelCase : Optional[int] = top_db
UpperCamelCase : Tuple = truncation
UpperCamelCase : str = padding
UpperCamelCase : str = fft_window_size
UpperCamelCase : int = (fft_window_size >> 1) + 1
UpperCamelCase : Optional[int] = hop_length
UpperCamelCase : Tuple = max_length_s
UpperCamelCase : Optional[Any] = max_length_s * sampling_rate
UpperCamelCase : List[str] = sampling_rate
UpperCamelCase : List[Any] = frequency_min
UpperCamelCase : Union[str, Any] = frequency_max
UpperCamelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale="""htk""" , )
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm="""slaney""" , mel_scale="""slaney""" , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = copy.deepcopy(self.__dict__ )
UpperCamelCase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self , _A , _A = None ):
'''simple docstring'''
UpperCamelCase : Dict = spectrogram(
_A , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel="""dB""" , )
return log_mel_spectrogram.T
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase : Union[str, Any] = [0]
# randomly choose index for each part
UpperCamelCase : Optional[int] = np.random.choice(ranges[0] )
UpperCamelCase : List[Any] = np.random.choice(ranges[1] )
UpperCamelCase : int = np.random.choice(ranges[2] )
UpperCamelCase : str = mel[idx_front : idx_front + chunk_frames, :]
UpperCamelCase : List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCamelCase : int = mel[idx_back : idx_back + chunk_frames, :]
UpperCamelCase : int = torch.tensor(mel[None, None, :] )
UpperCamelCase : Union[str, Any] = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 6_4] , mode="""bilinear""" , align_corners=_A )
UpperCamelCase : List[Any] = mel_shrink[0][0].numpy()
UpperCamelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _a ( self , _A , _A , _A , _A ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCamelCase : Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCamelCase : Union[str, Any] = len(_A ) - max_length
UpperCamelCase : Optional[int] = np.random.randint(0 , overflow + 1 )
UpperCamelCase : Optional[int] = waveform[idx : idx + max_length]
UpperCamelCase : Union[str, Any] = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCamelCase : List[Any] = self._np_extract_fbank_features(_A , self.mel_filters )
UpperCamelCase : Union[str, Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCamelCase : Any = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCamelCase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
UpperCamelCase : Optional[Any] = False
else:
UpperCamelCase : Optional[int] = self._random_mel_fusion(_A , _A , _A )
UpperCamelCase : Any = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
UpperCamelCase : Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCamelCase : int = int(max_length / len(_A ) )
UpperCamelCase : Tuple = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCamelCase : Any = int(max_length / len(_A ) )
UpperCamelCase : str = np.stack(np.tile(_A , _A ) )
UpperCamelCase : Optional[int] = np.pad(_A , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
UpperCamelCase : List[Any] = self._np_extract_fbank_features(_A , self.mel_filters )
UpperCamelCase : Any = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
UpperCamelCase : Union[str, Any] = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ):
'''simple docstring'''
UpperCamelCase : str = truncation if truncation is not None else self.truncation
UpperCamelCase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase : int = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : Any = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
UpperCamelCase : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCamelCase : List[str] = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Optional[int] = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCamelCase : int = np.random.randint(0 , len(_A ) )
UpperCamelCase : List[str] = True
if isinstance(input_mel[0] , _A ):
UpperCamelCase : Dict = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCamelCase : Optional[int] = [[longer] for longer in is_longer]
UpperCamelCase : Union[str, Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
UpperCamelCase : List[Any] = BatchFeature(_A )
if return_tensors is not None:
UpperCamelCase : str = input_features.convert_to_tensors(_A )
return input_features
| 102 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''ViTFeatureExtractor''']
snake_case = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 104 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' ,revision='bf16' ,dtype=jnp.bfloataa ,)
SCREAMING_SNAKE_CASE_ : List[str] = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ : str = jax.device_count()
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Any = sd_pipe.prepare_inputs(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = replicate(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = shard(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Dict = jax.random.split(snake_case__ ,jax.device_count() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe(snake_case__ ,snake_case__ ,snake_case__ ,num_inference_steps=25 ,jit=snake_case__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_ : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_ : int = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_ : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'stabilityai/stable-diffusion-2'
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case__ ,subfolder='scheduler' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
snake_case__ ,scheduler=snake_case__ ,revision='bf16' ,dtype=jnp.bfloataa ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_params
SCREAMING_SNAKE_CASE_ : Tuple = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.device_count()
SCREAMING_SNAKE_CASE_ : Tuple = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe.prepare_inputs(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = replicate(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = shard(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : str = jax.random.split(snake_case__ ,jax.device_count() )
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe(snake_case__ ,snake_case__ ,snake_case__ ,num_inference_steps=25 ,jit=snake_case__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_ : Tuple = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 105 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case :List[Any] ={
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] =[
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] =[
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__snake_case :Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[Any] = '''▁'''
_UpperCAmelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
_UpperCAmelCase : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
_UpperCAmelCase : Tuple = {
'''google/pegasus-xsum''': 5_12,
}
_UpperCAmelCase : str = logging.get_logger(__name__)
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any]="<pad>", UpperCamelCase__ : List[Any]="</s>", UpperCamelCase__ : Dict="<unk>", UpperCamelCase__ : List[Any]="<mask_2>", UpperCamelCase__ : Dict="<mask_1>", UpperCamelCase__ : List[str]=None, UpperCamelCase__ : List[str]=1_03, UpperCamelCase__ : Optional[Dict[str, Any]] = None, **UpperCamelCase__ : List[str], ) -> None:
_A = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError(
f'additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is'
f' {type(UpperCamelCase__ )}' )
_A = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(UpperCamelCase__ ), self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
_A = additional_special_tokens_extended
else:
_A = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset )]
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, mask_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token_sent=UpperCamelCase__, offset=UpperCamelCase__, additional_special_tokens=UpperCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCamelCase__, )
_A = mask_token_sent
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# add special tokens to encoder dict
_A = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
_A = {v: k for k, v in self.encoder.items()}
@property
def __UpperCAmelCase ( self : str ) -> int:
return len(self.sp_model ) + self.offset
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, int]:
_A = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Tuple:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Any, UpperCamelCase__ : str ) -> List[str]:
_A = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCamelCase__, out_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_A = self.sp_model.piece_to_id(UpperCamelCase__ )
return sp_id + self.offset
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_A = self.sp_model.IdToPiece(index - self.offset )
return token
def __UpperCAmelCase ( self : str, UpperCamelCase__ : Dict ) -> Optional[int]:
_A = []
_A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
_A = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : int=False ) -> List[Any]:
return 1
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Optional[Any] ) -> List[str]:
_A = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List, UpperCamelCase__ : Optional[List] = None, UpperCamelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_A = os.path.join(
UpperCamelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__, 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 107 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
_UpperCAmelCase = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
_UpperCAmelCase = 6
_UpperCAmelCase = 1_2_8
_UpperCAmelCase = (2, 2, 1_8, 2)
_UpperCAmelCase = (4, 8, 1_6, 3_2)
elif "large" in model_name:
_UpperCAmelCase = 1_2
_UpperCAmelCase = 1_9_2
_UpperCAmelCase = (2, 2, 1_8, 2)
_UpperCAmelCase = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
_UpperCAmelCase = window_size
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
return config
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]:
if "encoder.mask_token" in name:
_UpperCAmelCase = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
_UpperCAmelCase = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
_UpperCAmelCase = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
_UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_UpperCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
_UpperCAmelCase = """layernorm.weight"""
if name == "encoder.norm.bias":
_UpperCAmelCase = """layernorm.bias"""
if "decoder" in name:
pass
else:
_UpperCAmelCase = """swin.""" + name
return name
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
_UpperCAmelCase = key.split(""".""" )
_UpperCAmelCase = int(key_split[2] )
_UpperCAmelCase = int(key_split[4] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
_UpperCAmelCase = torch.load(__snake_case , map_location="""cpu""" )["""model"""]
_UpperCAmelCase = get_swin_config(__snake_case )
_UpperCAmelCase = SwinForMaskedImageModeling(__snake_case )
model.eval()
_UpperCAmelCase = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = ViTImageProcessor(size={"""height""": 1_9_2, """width""": 1_9_2} )
_UpperCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
_UpperCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
with torch.no_grad():
_UpperCAmelCase = model(**__snake_case ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
__a: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__a: int = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 108 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __a ( _snake_case, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__UpperCamelCase : Optional[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 128, 128) ,rng=random.Random(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ort.SessionOptions()
__SCREAMING_SNAKE_CASE = False
return options
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,scheduler=lowerCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 109 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def lowerCamelCase ( ):
UpperCAmelCase__ : dict[int, int] = {}
UpperCAmelCase__ : Any = 2
while True:
UpperCAmelCase__ : Optional[int] = factor_map.pop(_snake_case ,_snake_case )
if factor:
UpperCAmelCase__ : Any = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase__ : int = factor
else:
UpperCAmelCase__ : List[Any] = prime
yield prime
prime += 1
def lowerCamelCase ( _snake_case = 1e1_0 ):
UpperCAmelCase__ : List[Any] = sieve()
UpperCAmelCase__ : Optional[int] = 1
while True:
UpperCAmelCase__ : Optional[Any] = next(_snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 110 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ : Union[str, Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ["ConditionalDetrFeatureExtractor"]
lowerCAmelCase_ : str = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 673 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = RoFormerTokenizer
__lowerCamelCase = RoFormerTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCamelCase ( self , **lowercase ) -> Any:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__snake_case )
def UpperCamelCase ( self , **lowercase ) -> Tuple:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__snake_case )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = """永和服装饰品有限公司,今天天气非常好"""
A__ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = self.get_chinese_input_output_texts()
A__ = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
A__ = tokens + [tokenizer.unk_token]
A__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.get_rust_tokenizer()
A__ = self.get_chinese_input_output_texts()
A__ = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
A__ = tokens + [tokenizer.unk_token]
A__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
pass
| 514 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase = 2_5_0_0_0_4
UpperCAmelCase = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = MBartTokenizer
_SCREAMING_SNAKE_CASE : List[Any] = MBartTokenizerFast
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : List[str] = True
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = MBartTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = MBartTokenizer(__snake_case , keep_accents=__snake_case )
snake_case_ : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(__snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : Any = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case_ : List[str] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Dict = tokenizer_r.save_pretrained(__snake_case )
snake_case_ : Dict = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
snake_case_ : int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
snake_case_ : Any = tokenizer_r.from_pretrained(__snake_case )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
snake_case_ : List[str] = tempfile.mkdtemp()
snake_case_ : Optional[int] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
snake_case_ : Dict = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
snake_case_ : Any = tokenizer_r.from_pretrained(__snake_case )
snake_case_ : int = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : Dict = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
snake_case_ : List[str] = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : str = tokenizer_r.from_pretrained(__snake_case )
snake_case_ : Optional[int] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = "facebook/mbart-large-en-ro"
_SCREAMING_SNAKE_CASE : Any = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_SCREAMING_SNAKE_CASE : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_SCREAMING_SNAKE_CASE : Dict = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def UpperCAmelCase__ ( cls : str ) -> int:
'''simple docstring'''
snake_case_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
snake_case_ : Any = 1
return cls
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
snake_case_ : Union[str, Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
snake_case_ : Optional[int] = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
snake_case_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __snake_case )
snake_case_ : Dict = 10
snake_case_ : Optional[Any] = self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __snake_case )
self.assertEqual(len(__snake_case ) , __snake_case )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_26, 25_00_01] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
snake_case_ : Dict = MBartTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors="pt" )
snake_case_ : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
snake_case_ : Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors="pt" )
snake_case_ : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors="pt" )
snake_case_ : List[Any] = targets["""input_ids"""]
snake_case_ : List[str] = shift_tokens_right(__snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : str ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__snake_case ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 30_34, 2, 25_00_04]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 666 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase__ )] )
SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase__ ) ) , x.transpose() ) , UpperCAmelCase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = (1, 2, 1)
SCREAMING_SNAKE_CASE = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE = SARIMAX(
UpperCAmelCase__ , exog=UpperCAmelCase__ , order=UpperCAmelCase__ , seasonal_order=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = model.fit(disp=UpperCAmelCase__ , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE = model_fit.predict(1 , len(UpperCAmelCase__ ) , exog=[test_match] )
return result[0]
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = regressor.predict(UpperCAmelCase__ )
return y_pred[0]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
train_user.sort()
SCREAMING_SNAKE_CASE = np.percentile(UpperCAmelCase__ , 2_5 )
SCREAMING_SNAKE_CASE = np.percentile(UpperCAmelCase__ , 7_5 )
SCREAMING_SNAKE_CASE = qa - qa
SCREAMING_SNAKE_CASE = qa - (iqr * 0.1)
return low_lim
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE = not_safe + 1
else:
if abs(abs(UpperCAmelCase__ ) - abs(UpperCAmelCase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase : Tuple = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
_lowerCamelCase : str = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
_lowerCamelCase : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase : Dict = normalize_df[:, 2].tolist()
_lowerCamelCase : List[str] = normalize_df[:, 0].tolist()
_lowerCamelCase : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase : List[Any] = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase : List[Any] = x[: len(x) - 1]
_lowerCamelCase : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase : Dict = total_date[: len(total_date) - 1]
_lowerCamelCase : Optional[Any] = total_user[: len(total_user) - 1]
_lowerCamelCase : int = total_match[: len(total_match) - 1]
_lowerCamelCase : int = total_date[len(total_date) - 1 :]
_lowerCamelCase : Dict = total_user[len(total_user) - 1 :]
_lowerCamelCase : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase : Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print('''Today\'s data is {not_str}safe.''')
| 403 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __lowerCamelCase ( UpperCamelCase__ ):
a__: Any = 'ibert'
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=False , UpperCAmelCase="none" , **UpperCAmelCase , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = quant_mode
lowerCamelCase_ = force_dequant
class __lowerCamelCase ( UpperCamelCase__ ):
@property
def UpperCAmelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 29 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase ( a__ , a__ , a__ , a__ , a__=True , a__="pt" ):
'''simple docstring'''
lowerCAmelCase :int = {"""add_prefix_space""": True} if isinstance(a__ , a__ ) and not line.startswith(' ' ) else {}
lowerCAmelCase :Dict = padding_side
return tokenizer(
[line] , max_length=a__ , padding='max_length' if pad_to_max_length else None , truncation=a__ , return_tensors=a__ , add_special_tokens=a__ , **a__ , )
def UpperCAmelCase ( a__ , a__ , a__=None , ):
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = input_ids.ne(a__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( UpperCamelCase__ ):
def __init__( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict="train" , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]="" , ) -> Tuple:
super().__init__()
lowerCAmelCase :List[str] = Path(__snake_case ).joinpath(type_path + '.source' )
lowerCAmelCase :Tuple = Path(__snake_case ).joinpath(type_path + '.target' )
lowerCAmelCase :int = self.get_char_lens(self.src_file )
lowerCAmelCase :Optional[Any] = max_source_length
lowerCAmelCase :int = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowerCAmelCase :Any = tokenizer
lowerCAmelCase :str = prefix
if n_obs is not None:
lowerCAmelCase :Union[str, Any] = self.src_lens[:n_obs]
lowerCAmelCase :Dict = src_lang
lowerCAmelCase :List[str] = tgt_lang
def __len__( self : Tuple ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self : int , UpperCAmelCase : List[Any] ) -> List[str]:
lowerCAmelCase :str = index + 1 # linecache starts at 1
lowerCAmelCase :List[Any] = self.prefix + linecache.getline(str(self.src_file ) , __snake_case ).rstrip('\n' )
lowerCAmelCase :Tuple = linecache.getline(str(self.tgt_file ) , __snake_case ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __snake_case ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __snake_case ) else self.tokenizer
)
lowerCAmelCase :Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , __snake_case ) else self.tokenizer
lowerCAmelCase :Union[str, Any] = encode_line(__snake_case , __snake_case , self.max_source_length , 'right' )
lowerCAmelCase :str = encode_line(__snake_case , __snake_case , self.max_target_length , 'right' )
lowerCAmelCase :Tuple = source_inputs["""input_ids"""].squeeze()
lowerCAmelCase :Any = target_inputs["""input_ids"""].squeeze()
lowerCAmelCase :Any = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase : List[str] ) -> Dict:
return [len(__snake_case ) for x in Path(__snake_case ).open().readlines()]
def UpperCAmelCase__ ( self : Dict , UpperCAmelCase : List[str] ) -> Optional[Any]:
lowerCAmelCase :Any = torch.stack([x['input_ids'] for x in batch] )
lowerCAmelCase :Any = torch.stack([x['attention_mask'] for x in batch] )
lowerCAmelCase :Optional[Any] = torch.stack([x['decoder_input_ids'] for x in batch] )
lowerCAmelCase :Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __snake_case )
else self.tokenizer.pad_token_id
)
lowerCAmelCase :Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __snake_case )
else self.tokenizer.pad_token_id
)
lowerCAmelCase :Tuple = trim_batch(__snake_case , __snake_case )
lowerCAmelCase :Dict = trim_batch(__snake_case , __snake_case , attention_mask=__snake_case )
lowerCAmelCase :Union[str, Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__SCREAMING_SNAKE_CASE = getLogger(__name__)
def UpperCAmelCase ( a__ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(a__ ) )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :str = get_git_info()
save_json(a__ , os.path.join(a__ , 'git_log.json' ) )
def UpperCAmelCase ( a__ , a__ , a__=4 , **a__ ):
'''simple docstring'''
with open(a__ , 'w' ) as f:
json.dump(a__ , a__ , indent=a__ , **a__ )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
with open(a__ ) as f:
return json.load(a__ )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :str = git.Repo(search_parent_directories=a__ )
lowerCAmelCase :Optional[Any] = {
"""repo_id""": str(a__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
return list(map(a__ , a__ ) )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
with open(a__ , 'wb' ) as f:
return pickle.dump(a__ , a__ )
def UpperCAmelCase ( a__ ):
'''simple docstring'''
def remove_articles(a__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , a__ )
def white_space_fix(a__ ):
return " ".join(text.split() )
def remove_punc(a__ ):
lowerCAmelCase :Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :str = normalize_answer(a__ ).split()
lowerCAmelCase :Tuple = normalize_answer(a__ ).split()
lowerCAmelCase :Any = Counter(a__ ) & Counter(a__ )
lowerCAmelCase :Any = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase :List[Any] = 1.0 * num_same / len(a__ )
lowerCAmelCase :Union[str, Any] = 1.0 * num_same / len(a__ )
lowerCAmelCase :List[str] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
return normalize_answer(a__ ) == normalize_answer(a__ )
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
assert len(a__ ) == len(a__ )
lowerCAmelCase :int = 0
for hypo, pred in zip(a__ , a__ ):
em += exact_match_score(a__ , a__ )
if len(a__ ) > 0:
em /= len(a__ )
return {"em": em}
def UpperCAmelCase ( a__ ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def UpperCAmelCase ( a__ , a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase :Any = """dropout_rate"""
for p in extra_params:
if getattr(a__ , a__ , a__ ):
if not hasattr(a__ , a__ ) and not hasattr(a__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(a__ ) )
delattr(a__ , a__ )
continue
lowerCAmelCase :str = p if hasattr(a__ , a__ ) else equivalent_param[p]
setattr(a__ , a__ , getattr(a__ , a__ ) )
delattr(a__ , a__ )
return hparams, config | 553 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
"""simple docstring"""
snake_case_ : Optional[int] = OmegaConf.load(SCREAMING_SNAKE_CASE__ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE__ ) ) )
return config
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
"""simple docstring"""
if conf_path is None:
snake_case_ : List[str] = """./model_checkpoints/vqgan_only.yaml"""
snake_case_ : Dict = load_config(SCREAMING_SNAKE_CASE__ , display=SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
snake_case_ : Optional[Any] = """./model_checkpoints/vqgan_only.pt"""
snake_case_ : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
if ".ckpt" in ckpt_path:
snake_case_ : Any = sd["""state_dict"""]
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
del sd
return model
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = model.encode(SCREAMING_SNAKE_CASE__ )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
snake_case_ : List[Any] = model.decode(SCREAMING_SNAKE_CASE__ )
return xrec
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=False ):
"""simple docstring"""
snake_case_ : Optional[int] = string.rsplit(""".""" , 1 )
if reload:
snake_case_ : Optional[int] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
importlib.reload(SCREAMING_SNAKE_CASE__ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE__ , package=SCREAMING_SNAKE_CASE__ ) , cls )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
"""simple docstring"""
snake_case_ : str = instantiate_from_config(SCREAMING_SNAKE_CASE__ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if ckpt:
snake_case_ : str = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
snake_case_ : Any = pl_sd["""global_step"""]
print(f'loaded model from global step {global_step}.' )
else:
snake_case_ : List[Any] = {"""state_dict""": None}
snake_case_ : Optional[Any] = None
snake_case_ : Tuple = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=SCREAMING_SNAKE_CASE__ , eval_mode=SCREAMING_SNAKE_CASE__ )["""model"""]
return model, global_step
| 480 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]:
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Dict:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Union[str, Any] ) -> int:
__lowerCAmelCase : Tuple = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__lowerCAmelCase : Any = [1_4_4, 1_9_2, 2_4_0]
__lowerCAmelCase : Union[str, Any] = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
__lowerCAmelCase : Dict = [9_6, 1_2_0, 1_4_4]
__lowerCAmelCase : str = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
__lowerCAmelCase : List[str] = [6_4, 8_0, 9_6]
__lowerCAmelCase : List[str] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
__lowerCAmelCase : Optional[int] = 0.05
__lowerCAmelCase : int = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__lowerCAmelCase : int = 5_1_2
__lowerCAmelCase : Dict = 1_6
__lowerCAmelCase : Union[str, Any] = 2_1
__lowerCAmelCase : int = """pascal-voc-id2label.json"""
else:
__lowerCAmelCase : Union[str, Any] = 1_0_0_0
__lowerCAmelCase : List[Any] = """imagenet-1k-id2label.json"""
__lowerCAmelCase : List[Any] = """huggingface/label-files"""
__lowerCAmelCase : Dict = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : Any = {int(__A ): v for k, v in idalabel.items()}
__lowerCAmelCase : int = idalabel
__lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (__A : Union[str, Any] , __A : str=False ) -> Optional[Any]:
for i in range(1 , 6 ):
if f'''layer_{i}.''' in name:
__lowerCAmelCase : Dict = name.replace(f'''layer_{i}.''' , f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__lowerCAmelCase : List[Any] = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
__lowerCAmelCase : int = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
__lowerCAmelCase : str = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
__lowerCAmelCase : Dict = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__lowerCAmelCase : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__lowerCAmelCase : Optional[Any] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
__lowerCAmelCase : Tuple = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
__lowerCAmelCase : Any = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
__lowerCAmelCase : Optional[Any] = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
__lowerCAmelCase : int = name.replace(f'''.{i}.{j}.''' , f'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
__lowerCAmelCase : Dict = name.replace(f'''.{i}.{j}.''' , f'''.{i}.''' )
if "expand_1x1" in name:
__lowerCAmelCase : Optional[int] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__lowerCAmelCase : Optional[int] = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__lowerCAmelCase : Any = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f'''.global_rep.{i}.weight''' in name:
__lowerCAmelCase : Dict = name.replace(f'''.global_rep.{i}.weight''' , """.layernorm.weight""" )
if f'''.global_rep.{i}.bias''' in name:
__lowerCAmelCase : Tuple = name.replace(f'''.global_rep.{i}.bias''' , """.layernorm.bias""" )
if ".global_rep." in name:
__lowerCAmelCase : List[str] = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
__lowerCAmelCase : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__lowerCAmelCase : Optional[Any] = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__lowerCAmelCase : List[str] = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__lowerCAmelCase : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__lowerCAmelCase : Optional[int] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
__lowerCAmelCase : int = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
__lowerCAmelCase : List[Any] = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
__lowerCAmelCase : List[Any] = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
__lowerCAmelCase : Tuple = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__lowerCAmelCase : Dict = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__lowerCAmelCase : Tuple = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__lowerCAmelCase : Any = """mobilevit.""" + name
return name
def snake_case_ (__A : Union[str, Any] , __A : int , __A : int=False ) -> str:
if base_model:
__lowerCAmelCase : Any = """"""
else:
__lowerCAmelCase : Tuple = """mobilevit."""
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : Dict = orig_state_dict.pop(__A )
if key[:8] == "encoder.":
__lowerCAmelCase : Optional[int] = key[8:]
if "qkv" in key:
__lowerCAmelCase : Tuple = key.split(""".""" )
__lowerCAmelCase : Dict = int(key_split[0][6:] ) - 1
__lowerCAmelCase : Dict = int(key_split[3] )
__lowerCAmelCase : Optional[int] = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__lowerCAmelCase : List[Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__lowerCAmelCase : Tuple = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__lowerCAmelCase : Optional[int] = val[:dim, :]
__lowerCAmelCase : Tuple = val[dim : dim * 2, :]
__lowerCAmelCase : List[str] = val[-dim:, :]
else:
__lowerCAmelCase : Tuple = val[:dim]
__lowerCAmelCase : str = val[dim : dim * 2]
__lowerCAmelCase : int = val[-dim:]
else:
__lowerCAmelCase : Optional[int] = val
return orig_state_dict
def snake_case_ () -> Union[str, Any]:
__lowerCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : str = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def snake_case_ (__A : Dict , __A : Optional[Any] , __A : Tuple , __A : int=False ) -> Dict:
__lowerCAmelCase : Optional[int] = get_mobilevit_config(__A )
# load original state_dict
__lowerCAmelCase : List[Any] = torch.load(__A , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__lowerCAmelCase : Union[str, Any] = MobileViTForSemanticSegmentation(__A ).eval()
else:
__lowerCAmelCase : Tuple = MobileViTForImageClassification(__A ).eval()
__lowerCAmelCase : Dict = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCAmelCase : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowerCAmelCase : List[Any] = model(**__A )
__lowerCAmelCase : Union[str, Any] = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
__lowerCAmelCase : Any = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__lowerCAmelCase : Dict = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__lowerCAmelCase : Dict = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8624, -9.5964], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
__lowerCAmelCase : List[str] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__lowerCAmelCase : Dict = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__lowerCAmelCase : Optional[Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , __A , atol=1e-4 )
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__A )
if push_to_hub:
__lowerCAmelCase : List[Any] = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__lowerCAmelCase : str = model_mapping[mobilevit_name]
image_processor.push_to_hub(__A , organization="""apple""" )
model.push_to_hub(__A , organization="""apple""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 651 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] )-> Optional[Any]:
_lowerCamelCase = []
_lowerCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowerCamelCase = result + left + right
return input_list
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Dict:
if len(snake_case ) <= 1:
return input_list
_lowerCamelCase = list(snake_case )
# iteration for two-way merging
_lowerCamelCase = 2
while p <= len(snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case ) , snake_case ):
_lowerCamelCase = i
_lowerCamelCase = i + p - 1
_lowerCamelCase = (low + high + 1) // 2
_lowerCamelCase = merge(snake_case , snake_case , snake_case , snake_case )
# final merge of last two parts
if p * 2 >= len(snake_case ):
_lowerCamelCase = i
_lowerCamelCase = merge(snake_case , 0 , snake_case , len(snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
A_ : Dict =input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
A_ : Optional[Any] =[]
else:
A_ : Optional[int] =[int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 650 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self :List[str] , __snake_case :int , __snake_case :int , __snake_case :float , **__snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =feature_size
__magic_name__ : Union[str, Any] =sampling_rate
__magic_name__ : List[Any] =padding_value
__magic_name__ : List[str] =kwargs.pop("""padding_side""" , """right""" )
__magic_name__ : Tuple =kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def A__ ( self :Any , __snake_case :Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case :Union[bool, str, PaddingStrategy] = True , __snake_case :Optional[int] = None , __snake_case :bool = False , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[Union[str, TensorType]] = None , ):
'''simple docstring'''
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__ : Union[str, Any] ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
__magic_name__ : int =processed_features[self.model_input_names[0]]
__magic_name__ : Union[str, Any] =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__ : List[str] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__ : Optional[int] =required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__ : Optional[Any] =0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__ : List[str] =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__ : int ="""tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__ : str ="""pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__ : List[Any] ="""np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(__snake_case )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__ : List[str] =to_numpy(__snake_case )
else:
__magic_name__ : str =[to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__ : Dict =self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__ : Optional[Any] =processed_features[self.model_input_names[0]]
__magic_name__ : Dict =len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__ : Optional[int] =[]
for i in range(__snake_case ):
__magic_name__ : Any ={k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__ : List[str] =self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__ : Optional[int] =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__ : Tuple =PaddingStrategy.MAX_LENGTH
__magic_name__ : str ={}
for i in range(__snake_case ):
# padding
__magic_name__ : List[str] =self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__ : Dict =[]
if value.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[int] =value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def A__ ( self :Any , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
__magic_name__ : Dict =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__ : Any =len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : Dict =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : List[Any] =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__ : int =np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__ : List[Any] =max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__ : Tuple =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__ : str =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__ : str =np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__ : Optional[int] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__ : List[Any] =np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A__ ( self :Optional[Any] , __snake_case :Union[Dict[str, np.ndarray], BatchFeature] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :Optional[bool] = None , ):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__ : Union[str, Any] =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__ : List[str] =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__ : Any =len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__ : List[Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__ : List[str] =processed_features["""attention_mask"""][:max_length]
return processed_features
def A__ ( self :List[Any] , __snake_case :str=False , __snake_case :Optional[int]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
__magic_name__ : Union[str, Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__ : Optional[int] =PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : Any =padding
else:
__magic_name__ : Any =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 21 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def __magic_name__( self :Union[str, Any] ) -> int:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE : Tuple = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE : List[str] = KarrasVePipeline(unet=__snake_case , scheduler=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe(num_inference_steps=2 , generator=__snake_case , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(num_inference_steps=2 , generator=__snake_case , output_type='''numpy''' , return_dict=__snake_case )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = """google/ncsnpp-celebahq-256"""
__SCREAMING_SNAKE_CASE : str = UNetaDModel.from_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE : int = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE : List[Any] = KarrasVePipeline(unet=__snake_case , scheduler=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = pipe(num_inference_steps=20 , generator=__snake_case , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 696 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCamelCase__ ):
_A : int = (KDPMaDiscreteScheduler,)
_A : Optional[int] = 10
def UpperCamelCase_ ( self , **snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__snake_case )
return config
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(__snake_case , __snake_case )
UpperCAmelCase = model(__snake_case , __snake_case )
UpperCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(__snake_case ) )
UpperCAmelCase = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
if torch_device == "mps":
return
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(__snake_case , __snake_case )
UpperCAmelCase = model(__snake_case , __snake_case )
UpperCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(__snake_case ) )
UpperCAmelCase = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
if torch_device == "mps":
return
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(__snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(__snake_case , __snake_case )
UpperCAmelCase = model(__snake_case , __snake_case )
UpperCAmelCase = scheduler.step(__snake_case , __snake_case , __snake_case )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(__snake_case ) )
UpperCAmelCase = torch.mean(torch.abs(__snake_case ) )
if str(__snake_case ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 673 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = KandinskyInpaintPipeline
UpperCamelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 32
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return 1_00
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : str =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__magic_name__ : Tuple =MultilingualCLIP(__snake_case )
__magic_name__ : Optional[int] =text_encoder.eval()
return text_encoder
@property
def A__ ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Optional[Any] ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ : Union[str, Any] =UNetaDConditionModel(**__snake_case )
return model
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__magic_name__ : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =self.dummy_text_encoder
__magic_name__ : Optional[Any] =self.dummy_tokenizer
__magic_name__ : Optional[Any] =self.dummy_unet
__magic_name__ : Tuple =self.dummy_movq
__magic_name__ : List[str] =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__snake_case , )
__magic_name__ : str ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self :str , __snake_case :Optional[Any] , __snake_case :int=0 ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : Dict =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__magic_name__ : str =floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__ : int =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str =Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__magic_name__ : Dict =np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Any =0
if str(__snake_case ).startswith("""mps""" ):
__magic_name__ : Dict =torch.manual_seed(__snake_case )
else:
__magic_name__ : Tuple =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__ : List[Any] ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple ="""cpu"""
__magic_name__ : List[Any] =self.get_dummy_components()
__magic_name__ : Union[str, Any] =self.pipeline_class(**__snake_case )
__magic_name__ : Tuple =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple =pipe(**self.get_dummy_inputs(__snake_case ) )
__magic_name__ : List[Any] =output.images
__magic_name__ : Any =pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__magic_name__ : int =image[0, -3:, -3:, -1]
__magic_name__ : str =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] =np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self :Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def A__ ( self :List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
__magic_name__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ : List[Any] =np.ones((7_68, 7_68) , dtype=np.floataa )
__magic_name__ : Any =0
__magic_name__ : int ="""a hat"""
__magic_name__ : int =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__magic_name__ : Dict =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
__magic_name__ : int =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ : Dict =pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ : Optional[Any] =pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
__magic_name__ : Optional[int] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 21 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCamelCase ( lowercase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
| 514 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self :int , __snake_case :List[Any] , __snake_case :List[Any]=2 , __snake_case :Dict=True , __snake_case :Tuple=False , __snake_case :List[str]=10 , __snake_case :List[str]=3 , __snake_case :Union[str, Any]=32 * 8 , __snake_case :Optional[int]=32 * 8 , __snake_case :Any=4 , __snake_case :Union[str, Any]=64 , ):
'''simple docstring'''
__magic_name__ : Optional[int] =parent
__magic_name__ : List[Any] =batch_size
__magic_name__ : List[str] =is_training
__magic_name__ : List[str] =use_auxiliary_loss
__magic_name__ : Union[str, Any] =num_queries
__magic_name__ : str =num_channels
__magic_name__ : Union[str, Any] =min_size
__magic_name__ : Union[str, Any] =max_size
__magic_name__ : Optional[int] =num_labels
__magic_name__ : Tuple =hidden_dim
__magic_name__ : Any =hidden_dim
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
__magic_name__ : List[Any] =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
__magic_name__ : List[str] =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
__magic_name__ : Union[str, Any] =(torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
__magic_name__ : str =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Dict =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__magic_name__ : str =self.num_queries
__magic_name__ : Dict =self.num_labels
__magic_name__ : int =[1, 1, 1, 1]
__magic_name__ : List[str] =self.num_channels
__magic_name__ : str =64
__magic_name__ : List[str] =1_28
__magic_name__ : Optional[Any] =self.hidden_dim
__magic_name__ : Tuple =self.hidden_dim
__magic_name__ : Optional[int] =self.hidden_dim
return config
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ : Optional[Any] ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self :Union[str, Any] , __snake_case :Tuple , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : int =output.encoder_hidden_states
__magic_name__ : List[str] =output.pixel_decoder_hidden_states
__magic_name__ : int =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def A__ ( self :List[Any] , __snake_case :Optional[Any] , __snake_case :int , __snake_case :str , __snake_case :str=False ):
'''simple docstring'''
with torch.no_grad():
__magic_name__ : List[str] =MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : int =model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def A__ ( self :Optional[Any] , __snake_case :List[str] , __snake_case :List[Any] , __snake_case :int , __snake_case :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case :List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__magic_name__ : int =model(pixel_values=__snake_case , pixel_mask=__snake_case )
__magic_name__ : List[str] =model(__snake_case )
comm_check_on_output(__snake_case )
__magic_name__ : Any =model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Any =MaskaFormerModelTester(self )
__magic_name__ : Union[str, Any] =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self :int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple =[*signature.parameters.keys()]
__magic_name__ : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__magic_name__ : int =MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =(self.model_tester.min_size,) * 2
__magic_name__ : Union[str, Any] ={
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
__magic_name__ : Optional[Any] =self.model_tester.get_config()
__magic_name__ : Dict =MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
__magic_name__ : Any =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] =model_class(__snake_case ).to(__snake_case )
__magic_name__ : Optional[int] =model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def A__ ( self :int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__magic_name__ : List[Any] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Dict =model_class(__snake_case )
model.to(__snake_case )
model.train()
__magic_name__ : Optional[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.all_model_classes[1]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple =True
__magic_name__ : Optional[int] =True
__magic_name__ : int =model_class(__snake_case ).to(__snake_case )
model.train()
__magic_name__ : List[Any] =model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
__magic_name__ : Optional[int] =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__magic_name__ : Union[str, Any] =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__magic_name__ : Optional[int] =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : Dict = 1e-4
def lowerCAmelCase_ ( ):
__magic_name__ : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self :int ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
__magic_name__ : int =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Any =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Dict =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : List[str] =model(**__snake_case )
__magic_name__ : Any =torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Dict =torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
__magic_name__ : Any =torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Tuple =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : Tuple =prepare_img()
__magic_name__ : List[Any] =image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
__magic_name__ : Union[str, Any] =inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__magic_name__ : str =model(**__snake_case )
# masks_queries_logits
__magic_name__ : List[Any] =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__magic_name__ : List[Any] =[
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__magic_name__ : Dict =torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
__magic_name__ : Any =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__magic_name__ : int =torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
__magic_name__ : Any =self.default_image_processor
__magic_name__ : Union[str, Any] =image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__magic_name__ : str =inputs["""pixel_values"""].to(__snake_case )
__magic_name__ : Tuple =[el.to(__snake_case ) for el in inputs["""mask_labels"""]]
__magic_name__ : Union[str, Any] =[el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__magic_name__ : Dict =model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 21 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( UpperCamelCase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = MgpstrTokenizer
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
_SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : int = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__snake_case ) + "\n" )
def UpperCAmelCase__ ( self : str , **A__ : Tuple ) -> List[str]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def UpperCAmelCase__ ( self : List[str] , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = """tester"""
snake_case_ : Any = """tester"""
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
snake_case_ : Union[str, Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case_ : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=__snake_case )
self.assertEqual(len(__snake_case ) , 1 )
snake_case_ : int = tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
snake_case_ : List[str] = self.get_input_output_texts(__snake_case )
snake_case_ : Dict = tokenizer.tokenize(__snake_case )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__snake_case )
snake_case_ : Dict = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case_ : Tuple = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertNotEqual(len(__snake_case ) , 0 )
snake_case_ : Optional[Any] = tokenizer.decode(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(text_a.replace(" " , "" ) , __snake_case )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
| 666 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( UpperCamelCase__ ):
lowercase__ : Union[str, Any] = (PNDMScheduler,)
lowercase__ : str = (("""num_inference_steps""", 50),)
def __snake_case( self : str , **_UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**__snake_case )
return config
def __snake_case( self : List[Any] , _UpperCamelCase : Union[str, Any]=0 , **_UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __snake_case )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__snake_case )
SCREAMING_SNAKE_CASE = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE = scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE = scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def __snake_case( self : Dict , _UpperCamelCase : Optional[int]=0 , **_UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __snake_case )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE = scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE = scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Any , **_UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__snake_case )
SCREAMING_SNAKE_CASE = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
SCREAMING_SNAKE_CASE = model(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE = scheduler.step_prk(__snake_case , __snake_case , __snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
SCREAMING_SNAKE_CASE = model(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE = scheduler.step_plms(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __snake_case )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , "set_timesteps" ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE = scheduler.step_prk(__snake_case , 0 , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step_prk(__snake_case , 1 , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE = scheduler.step_plms(__snake_case , 0 , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step_plms(__snake_case , 1 , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def __snake_case( self : int ) -> int:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__snake_case )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE = scheduler_class(**__snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__snake_case )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=__snake_case )
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__snake_case )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 27
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
SCREAMING_SNAKE_CASE = scheduler.step_prk(__snake_case , __snake_case , __snake_case ).prev_sample
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(__snake_case ):
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__snake_case ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__snake_case ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=__snake_case , beta_start=0.0_1 )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__snake_case ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __snake_case( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=__snake_case , beta_start=0.0_1 )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__snake_case ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 403 |
import heapq
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__magic_name__ : Tuple =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__magic_name__ : Tuple =heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__magic_name__ : Tuple =elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 21 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "spiece.model"}
A_ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
A_ = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class __lowerCamelCase ( UpperCamelCase__ ):
a__: Union[str, Any] = VOCAB_FILES_NAMES
a__: Tuple = PRETRAINED_VOCAB_FILES_MAP
a__: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = None , **UpperCAmelCase , ):
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase_ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase_ = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase_ = """<|endoftext|>""" if eos_token is None else eos_token
lowerCamelCase_ = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase_ = unk_token if pad_token is None else pad_token
lowerCamelCase_ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase_ = """<pad>""" if pad_token is None else pad_token
lowerCamelCase_ = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase_ = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase_ = re.compile(
f"[{''.join(map(__snake_case , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCAmelCase ):
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__ ( self ):
return len(self.sp_model )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = self.non_printing_characters_re.sub('''''' , __snake_case )
# Normalize whitespaces
lowerCamelCase_ = """""".join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase_ = unicodedata.normalize('''NFC''' , __snake_case )
return text
def UpperCAmelCase__ ( self , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = self.preprocess_text(__snake_case )
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return self.sp_model.PieceToId(__snake_case )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return self.sp_model.IdToPiece(__snake_case )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
return out_string
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = []
lowerCamelCase_ = """"""
lowerCamelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
lowerCamelCase_ = True
lowerCamelCase_ = []
else:
current_sub_tokens.append(__snake_case )
lowerCamelCase_ = False
out_string += self.sp_model.decode(__snake_case )
return out_string
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase_ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False ):
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ = self.preprocess_text(__snake_case )
lowerCamelCase_ = self.sp_model.encode(__snake_case )
else:
lowerCamelCase_ = [self.preprocess_text(__snake_case ) for t in text]
lowerCamelCase_ = self.sp_model.encode(__snake_case )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase_ = torch.tensor(__snake_case )
return token_ids
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return self.sp_model.decode(__snake_case )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
lowerCamelCase_ = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(__snake_case ) + f"{self.bos_token}Bot:"
)
return self.encode(text=__snake_case )
| 29 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[str]=18 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : str=400 , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=True , ) -> List[str]:
lowerCAmelCase :Tuple = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase :List[Any] = parent
lowerCAmelCase :Any = batch_size
lowerCAmelCase :str = num_channels
lowerCAmelCase :List[str] = image_size
lowerCAmelCase :str = min_resolution
lowerCAmelCase :Union[str, Any] = max_resolution
lowerCAmelCase :Tuple = do_resize
lowerCAmelCase :Optional[Any] = size
lowerCAmelCase :Dict = apply_ocr
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( UpperCamelCase__ , unittest.TestCase ):
lowercase_ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
lowerCAmelCase :Dict = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , 'do_resize' ) )
self.assertTrue(hasattr(__snake_case , 'size' ) )
self.assertTrue(hasattr(__snake_case , 'apply_ocr' ) )
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
lowerCAmelCase :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
lowerCAmelCase :Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def UpperCAmelCase__ ( self : str ) -> Tuple:
pass
def UpperCAmelCase__ ( self : Dict ) -> str:
lowerCAmelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
lowerCAmelCase :Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
lowerCAmelCase :Optional[int] = image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
lowerCAmelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase :str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase :Dict = image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase__ ( self : Dict ) -> int:
lowerCAmelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase :Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase :Union[str, Any] = image_processing(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCAmelCase__ ( self : int ) -> int:
lowerCAmelCase :int = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase :Union[str, Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowerCAmelCase :Dict = Image.open(ds[0]['file'] ).convert('RGB' )
lowerCAmelCase :str = image_processing(__snake_case , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase :Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase :Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
lowerCAmelCase :Dict = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
lowerCAmelCase :Union[str, Any] = image_processing(__snake_case , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 553 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : str =getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def lowerCAmelCase_ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
__magic_name__ : Dict =getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class __A ( UpperCamelCase__ ):
def __new__( cls :Dict , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : int =super().__new__(cls , __snake_case , __snake_case , __snake_case )
if not hasattr(__snake_case , """key_handler""" ):
setattr(__snake_case , """key_handler""" , {} )
setattr(__snake_case , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__magic_name__ : int =getattr(__snake_case , """handle_key""" , [] )
for key in handled_keys:
__magic_name__ : List[str] =value
return new_cls
@staticmethod
def A__ ( cls :Optional[int] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =get_character()
if char != KEYMAP["undefined"]:
__magic_name__ : Optional[int] =ord(__snake_case )
__magic_name__ : int =cls.key_handler.get(__snake_case )
if handler:
__magic_name__ : Dict =char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 | 0 |
"""simple docstring"""
from functools import reduce
a_ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] = N ):
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str(int(SCREAMING_SNAKE_CASE__ ) * int(SCREAMING_SNAKE_CASE__ ) ) , n[i : i + 1_3] ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 480 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ : Dict = 2048
UpperCAmelCase_ : int = 4096
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Optional[int] = os.environ.pop("PROCESS_TRAIN", "false")
UpperCAmelCase_ : str = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def lowerCAmelCase_ ( lowerCamelCase ):
def choose_first(lowerCamelCase , lowerCamelCase=False ):
assert isinstance(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
__magic_name__ : List[str] =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ : Tuple ={k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__magic_name__ : str ={"""id""": example["""id"""]}
__magic_name__ : List[Any] =example["""annotations"""]
__magic_name__ : List[str] =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ : Optional[int] =["""yes"""] if 1 in yes_no_answer else ["""no"""]
__magic_name__ : List[str] =[]
__magic_name__ : Dict =[]
__magic_name__ : str =["""<cls>"""]
else:
__magic_name__ : Tuple =["""short"""]
__magic_name__ : Optional[int] =choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__magic_name__ : Tuple =["""long"""]
__magic_name__ : Tuple =choose_first(annotation["""long_answer"""] , is_long_answer=lowerCamelCase )
__magic_name__ : List[Any] =[]
answer.update(lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ : Any =True
else:
__magic_name__ : List[str] =False
__magic_name__ : int =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =_get_single_answer(lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : Any =example["""document"""]["""tokens"""]
__magic_name__ : str =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ : Dict =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ : Tuple =example["""document"""]["""tokens"""]
__magic_name__ : Optional[int] =answer["""start_token"""]
__magic_name__ : List[Any] =answer["""end_token"""]
__magic_name__ : Optional[Any] =[]
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ : Optional[int] =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ : List[str] =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : str =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__magic_name__ : Dict =""" """.join([old[i] for i in range(len(lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowerCamelCase , end="""\n""" )
print("""Old:""" , lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=True ):
# overlap will be of doc_stride - q_len
__magic_name__ : Any =get_context_and_ans(lowerCamelCase , assertion=lowerCamelCase )
__magic_name__ : Union[str, Any] =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ : List[Any] =tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
__magic_name__ : Dict =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ : List[str] =[]
__magic_name__ : int =[]
__magic_name__ : List[str] =input_ids[:q_len]
__magic_name__ : Dict =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Tuple =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase ),
"end_token": [-100] * len(lowerCamelCase ),
"category": category,
},
}
__magic_name__ : int =out["""context"""].split()
__magic_name__ : Any =splitted_context[answer["""end_token"""]]
__magic_name__ : str =len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowerCamelCase , ).input_ids )
__magic_name__ : Optional[int] =len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ : Union[str, Any] =len(tokenizer(lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ : str =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__magic_name__ : Dict =answer["""start_token"""]
__magic_name__ : int =answer["""end_token"""]
if assertion:
__magic_name__ : Any =tokenizer.decode(lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowerCamelCase , end="""\n\n""" )
if len(lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ : Any =input_ids[:q_len]
__magic_name__ : Union[str, Any] =range(lowerCamelCase , len(lowerCamelCase ) , max_length - doc_stride )
__magic_name__ : Any =[]
__magic_name__ : List[str] =[]
__magic_name__ : List[str] =[]
__magic_name__ : str =[] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ : List[Any] =i + max_length - q_len
__magic_name__ : Dict =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ : List[Any] =start_token - i + q_len
__magic_name__ : Optional[Any] =end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__magic_name__ : Optional[Any] =-100
__magic_name__ : Optional[Any] =-100
answers_category.append("""null""" )
__magic_name__ : Optional[int] =inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase )
answers_end_token.append(lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=False ):
__magic_name__ : List[Any] =get_strided_contexts_and_ans(
lowerCamelCase , lowerCamelCase , doc_stride=lowerCamelCase , max_length=lowerCamelCase , assertion=lowerCamelCase , )
return example
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with jsonlines.open(lowerCamelCase , """a""" ) as writer:
for example in tqdm(lowerCamelCase , total=len(lowerCamelCase ) , desc="""Saving samples ... """ ):
__magic_name__ : int =example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ : Optional[int] = load_dataset("natural_questions")
UpperCAmelCase_ : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
UpperCAmelCase_ : str = data["train" if PROCESS_TRAIN == "true" else "validation"]
UpperCAmelCase_ : Optional[int] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
UpperCAmelCase_ : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ : int = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 21 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a: Any = logging.get_logger(__name__)
__a: Dict = TypeVar('''DatasetType''', Dataset, IterableDataset)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = "first_exhausted" , ) -> Union[str, Any]:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(__snake_case ):
if not isinstance(__snake_case , (Dataset, IterableDataset) ):
if isinstance(__snake_case , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__snake_case )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__snake_case ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__snake_case ).__name__}.""" )
if i == 0:
_UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(__snake_case , __snake_case ) else (IterableDataset, Dataset)
)
elif not isinstance(__snake_case , __snake_case ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__snake_case , __snake_case , __snake_case , info=__snake_case , split=__snake_case , stopping_strategy=__snake_case )
else:
return _interleave_iterable_datasets(
__snake_case , __snake_case , __snake_case , info=__snake_case , split=__snake_case , stopping_strategy=__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case = None , __snake_case = None , __snake_case = 0 , ) -> Optional[Any]:
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(__snake_case ):
if not isinstance(__snake_case , (Dataset, IterableDataset) ):
if isinstance(__snake_case , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__snake_case )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__snake_case ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__snake_case ).__name__}.""" )
if i == 0:
_UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(__snake_case , __snake_case ) else (IterableDataset, Dataset)
)
elif not isinstance(__snake_case , __snake_case ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__snake_case , info=__snake_case , split=__snake_case , axis=__snake_case )
else:
return _concatenate_iterable_datasets(__snake_case , info=__snake_case , split=__snake_case , axis=__snake_case ) | 108 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xlm-roberta-xl"""
def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Union[str, Any] =num_hidden_layers
__magic_name__ : Any =num_attention_heads
__magic_name__ : Any =hidden_act
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_dropout_prob
__magic_name__ : Union[str, Any] =attention_probs_dropout_prob
__magic_name__ : Any =max_position_embeddings
__magic_name__ : Any =type_vocab_size
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : Dict =position_embedding_type
__magic_name__ : Any =use_cache
__magic_name__ : Dict =classifier_dropout
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
__magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 21 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
# TODO Update this
__UpperCAmelCase = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] ="esm"
def __init__( self : List[Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=7_68 , lowerCAmelCase : str=12 , lowerCAmelCase : int=12 , lowerCAmelCase : str=30_72 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=10_26 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : List[str]=1e-12 , lowerCAmelCase : Any="absolute" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Any=None , lowerCAmelCase : Dict=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__snake_case , mask_token_id=__snake_case , **__snake_case )
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : int = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Optional[Any] = layer_norm_eps
__lowerCAmelCase : Optional[int] = position_embedding_type
__lowerCAmelCase : int = use_cache
__lowerCAmelCase : Any = emb_layer_norm_before
__lowerCAmelCase : List[str] = token_dropout
__lowerCAmelCase : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowerCAmelCase : Tuple = EsmFoldConfig()
elif isinstance(__snake_case , __snake_case ):
__lowerCAmelCase : Any = EsmFoldConfig(**__snake_case )
__lowerCAmelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowerCAmelCase : List[Any] = get_default_vocab_list()
else:
__lowerCAmelCase : Any = vocab_list
else:
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , __snake_case ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , __snake_case ):
__lowerCAmelCase : str = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : int =None
lowerCamelCase : List[str] =True
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Optional[int] =False
lowerCamelCase : str =False
lowerCamelCase : Union[str, Any] =0
lowerCamelCase : List[str] =True
lowerCamelCase : str =False
lowerCamelCase : List[Any] =128
lowerCamelCase : str =None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowerCAmelCase : Dict = TrunkConfig()
elif isinstance(self.trunk , __snake_case ):
__lowerCAmelCase : Optional[int] = TrunkConfig(**self.trunk )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = asdict(self )
__lowerCAmelCase : str = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Union[str, Any] =48
lowerCamelCase : Any =1024
lowerCamelCase : Tuple =128
lowerCamelCase : Dict =32
lowerCamelCase : Any =32
lowerCamelCase : List[str] =32
lowerCamelCase : Optional[int] =0
lowerCamelCase : Tuple =0
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Union[str, Any] =4
lowerCamelCase : str =128
lowerCamelCase : str =None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
"""simple docstring"""
if self.structure_module is None:
__lowerCAmelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , __snake_case ):
__lowerCAmelCase : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__lowerCAmelCase : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
__lowerCAmelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = asdict(self )
__lowerCAmelCase : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Union[str, Any] =384
lowerCamelCase : Union[str, Any] =128
lowerCamelCase : List[Any] =16
lowerCamelCase : Optional[int] =128
lowerCamelCase : List[Any] =12
lowerCamelCase : int =4
lowerCamelCase : List[Any] =8
lowerCamelCase : Optional[int] =0.1
lowerCamelCase : Any =8
lowerCamelCase : str =1
lowerCamelCase : List[Any] =2
lowerCamelCase : str =7
lowerCamelCase : Dict =10
lowerCamelCase : Tuple =1e-8
lowerCamelCase : Union[str, Any] =1e5
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
"""simple docstring"""
return asdict(self )
def snake_case_ () -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 651 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict )-> Tuple:
if not numbers:
return 0
if not isinstance(snake_case , (list, tuple) ) or not all(
isinstance(snake_case , snake_case ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowerCamelCase = numbers[0]
for i in range(1 , len(snake_case ) ):
# update the maximum and minimum subarray products
_lowerCamelCase = numbers[i]
if number < 0:
_lowerCamelCase = min_till_now, max_till_now
_lowerCamelCase = max(snake_case , max_till_now * number )
_lowerCamelCase = min(snake_case , min_till_now * number )
# update the maximum product found till now
_lowerCamelCase = max(snake_case , snake_case )
return max_prod
| 650 |
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =[]
__magic_name__ : List[Any] =11
__magic_name__ : Tuple =int("""1""" + """0""" * digit_len )
for num in range(lowerCamelCase , lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowerCamelCase , lowerCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__magic_name__ : List[str] =10
return solutions
def lowerCAmelCase_ ( lowerCamelCase = 2 ):
__magic_name__ : str =1.0
for fraction in fraction_list(lowerCamelCase ):
__magic_name__ : int =Fraction(lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 21 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
class _lowercase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['''pixel_values''']
def __init__( self :Union[str, Any] , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :str , ) -> List[Any]:
super().__init__(**__snake_case )
__SCREAMING_SNAKE_CASE : int = size if size is not None else {"""shortest_edge""": 384}
__SCREAMING_SNAKE_CASE : str = get_size_dict(__snake_case , default_to_square=__snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = do_resize
__SCREAMING_SNAKE_CASE : str = size
# Default value set here for backwards compatibility where the value in config is None
__SCREAMING_SNAKE_CASE : str = crop_pct if crop_pct is not None else 224 / 256
__SCREAMING_SNAKE_CASE : List[str] = resample
__SCREAMING_SNAKE_CASE : Any = do_rescale
__SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize
__SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :float , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : Dict = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__SCREAMING_SNAKE_CASE : Optional[int] = int(shortest_edge / crop_pct )
__SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case )
__SCREAMING_SNAKE_CASE : int = resize(image=__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__snake_case , size=(shortest_edge, shortest_edge) , data_format=__snake_case , **__snake_case )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__snake_case , size=(shortest_edge, shortest_edge) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__( self :int , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Any , ) -> Union[str, Any]:
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__( self :List[str] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[Any] , ) -> str:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__( self :List[str] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ :Union[str, Any] , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__SCREAMING_SNAKE_CASE : Optional[int] = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : int = get_size_dict(__snake_case , default_to_square=__snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Union[str, Any] = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : List[str] = [self.resize(image=__snake_case , size=__snake_case , crop_pct=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : List[str] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : Dict = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
__SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 696 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ : Any = "Enter the base and the power separated by a comma: "
lowerCAmelCase_ : Any = map(int, input(prompt).split(''','''))
lowerCAmelCase_ : Optional[Any] = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ : Union[str, Any] = res(xa, ya)
lowerCAmelCase_ : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 673 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]:
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[int] ) -> bool:
A__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE_ ) )
# The ratio of the area for circle to square is pi/4.
A__ = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int = 0.0 , SCREAMING_SNAKE_CASE_: Tuple = 1.0 , ) -> List[Any]:
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) * (max_value - min_value)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] = 0.0 , SCREAMING_SNAKE_CASE_: str = 1.0 ) -> int:
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE_: int ) -> float:
return x
A__ = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> List[Any]:
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE_: Tuple ) -> float:
return sqrt(4.0 - x * x )
A__ = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCAmelCase = logging.getLogger(__name__)
UpperCAmelCase = tf.data.AUTOTUNE
def SCREAMING_SNAKE_CASE_ ( ):
snake_case_ : Tuple = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowerCAmelCase_ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowerCAmelCase_ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowerCAmelCase_ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowerCAmelCase_ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowerCAmelCase_ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowerCAmelCase_ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowerCAmelCase_ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowerCAmelCase_ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowerCAmelCase_ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowerCAmelCase_ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowerCAmelCase_ , default=1e-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowerCAmelCase_ , default=1e-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowerCAmelCase_ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowerCAmelCase_ , default=0.1_5 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowerCAmelCase_ , help="Model ID to upload to on the Hugging Face Hub." )
snake_case_ : List[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[int] ):
try:
if args.tpu_name:
snake_case_ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowerCAmelCase_ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase_ )
return tpu
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any ):
snake_case_ : List[Any] = 0
for file in file_list:
snake_case_ : str = file.split("/" )[-1]
snake_case_ : Tuple = re.search(R"-\d+-(\d+)\.tfrecord" , lowerCAmelCase_ ).group(1 )
snake_case_ : int = int(lowerCAmelCase_ )
num_samples += sample_count
return num_samples
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: Any , lowerCAmelCase_: Any , lowerCAmelCase_: int=None ):
snake_case_ : Optional[Any] = count_samples(lowerCAmelCase_ )
snake_case_ : Optional[int] = tf.data.Dataset.from_tensor_slices(lowerCAmelCase_ )
if shuffle:
snake_case_ : Union[str, Any] = dataset.shuffle(len(lowerCAmelCase_ ) )
snake_case_ : List[str] = tf.data.TFRecordDataset(lowerCAmelCase_ , num_parallel_reads=lowerCAmelCase_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase_ ) )
snake_case_ : Dict = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ : Tuple = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ : Optional[int] = dataset.batch(lowerCAmelCase_ , drop_remainder=lowerCAmelCase_ )
snake_case_ : Any = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
snake_case_ : int = dataset.prefetch(lowerCAmelCase_ )
return dataset
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str ):
if not args.no_tpu:
snake_case_ : Optional[int] = initialize_tpu(lowerCAmelCase_ )
snake_case_ : str = tf.distribute.TPUStrategy(lowerCAmelCase_ )
else:
snake_case_ : Optional[Any] = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
snake_case_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ : Tuple = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ : Union[str, Any] = tokenizer.vocab_size
snake_case_ : str = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
snake_case_ : int = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
snake_case_ : Tuple = count_samples(lowerCAmelCase_ )
snake_case_ : Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ : Optional[Any] = TFAutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ : Dict = create_optimizer(
num_train_steps=lowerCAmelCase_ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase_ , metrics=["accuracy"] )
def decode_fn(lowerCAmelCase_: str ):
snake_case_ : Optional[int] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase_ , lowerCAmelCase_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase_ , return_tensors="tf" )
def mask_with_collator(lowerCAmelCase_: int ):
# TF really needs an isin() function
snake_case_ : Dict = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ : Union[str, Any] = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowerCAmelCase_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase_ , )
return batch
snake_case_ : Optional[Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ : Dict = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ : List[str] = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
snake_case_ : Optional[int] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase_ ) )
model.fit(
lowerCAmelCase_ , validation_data=lowerCAmelCase_ , epochs=args.num_epochs , callbacks=lowerCAmelCase_ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCAmelCase = parse_args()
main(args)
| 666 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase : List[str] = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
_lowerCamelCase : Union[str, Any] = dataset.iloc[:, 1:2].values
_lowerCamelCase : Dict = dataset.iloc[:, 2].values
_lowerCamelCase : Dict = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase : Union[str, Any] = PolynomialFeatures(degree=4)
_lowerCamelCase : Any = poly_reg.fit_transform(X)
_lowerCamelCase : Optional[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowerCamelCase ():
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color="red" )
plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 403 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
A_ = logging.get_logger(__name__)
# General docstring
A_ = "MobileNetV1Config"
# Base docstring
A_ = "google/mobilenet_v1_1.0_224"
A_ = [1, 1024, 7, 7]
# Image classification docstring
A_ = "google/mobilenet_v1_1.0_224"
A_ = "tabby, tabby cat"
A_ = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = {}
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = model.mobilenet_va
else:
lowerCamelCase_ = model
lowerCamelCase_ = """MobilenetV1/Conv2d_0/"""
lowerCamelCase_ = backbone.conv_stem.convolution.weight
lowerCamelCase_ = backbone.conv_stem.normalization.bias
lowerCamelCase_ = backbone.conv_stem.normalization.weight
lowerCamelCase_ = backbone.conv_stem.normalization.running_mean
lowerCamelCase_ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCamelCase_ = i + 1
lowerCamelCase_ = i * 2
lowerCamelCase_ = backbone.layer[pt_index]
lowerCamelCase_ = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
lowerCamelCase_ = pointer.convolution.weight
lowerCamelCase_ = pointer.normalization.bias
lowerCamelCase_ = pointer.normalization.weight
lowerCamelCase_ = pointer.normalization.running_mean
lowerCamelCase_ = pointer.normalization.running_var
lowerCamelCase_ = backbone.layer[pt_index + 1]
lowerCamelCase_ = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
lowerCamelCase_ = pointer.convolution.weight
lowerCamelCase_ = pointer.normalization.bias
lowerCamelCase_ = pointer.normalization.weight
lowerCamelCase_ = pointer.normalization.running_mean
lowerCamelCase_ = pointer.normalization.running_var
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
lowerCamelCase_ = model.classifier.weight
lowerCamelCase_ = model.classifier.bias
return tf_to_pt_map
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowerCamelCase_ = tf.train.list_variables(lowerCAmelCase__ )
lowerCamelCase_ = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
lowerCamelCase_ = tf.train.load_variable(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = array
# Build TF to PyTorch weights loading map
lowerCamelCase_ = _build_tf_to_pytorch_map(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
lowerCamelCase_ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowerCamelCase_ = np.transpose(lowerCAmelCase__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCamelCase_ = array.squeeze().transpose()
else:
lowerCamelCase_ = np.transpose(lowerCAmelCase__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
tf_weights.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp''' ,lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp_1''' ,lowerCAmelCase__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' ,lowerCAmelCase__ )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = features.shape[-2:]
lowerCamelCase_ = conv_layer.stride
lowerCamelCase_ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCamelCase_ = max(kernel_height - stride_height ,0 )
else:
lowerCamelCase_ = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
lowerCamelCase_ = max(kernel_width - stride_width ,0 )
else:
lowerCamelCase_ = max(kernel_width - (in_width % stride_width) ,0 )
lowerCamelCase_ = pad_along_width // 2
lowerCamelCase_ = pad_along_width - pad_left
lowerCamelCase_ = pad_along_height // 2
lowerCamelCase_ = pad_along_height - pad_top
lowerCamelCase_ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase__ ,lowerCAmelCase__ ,'''constant''' ,0.0 )
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = True , ):
super().__init__()
lowerCamelCase_ = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
lowerCamelCase_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCamelCase_ = nn.Convad(
in_channels=__snake_case , out_channels=__snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case , groups=__snake_case , bias=__snake_case , padding_mode='''zeros''' , )
if use_normalization:
lowerCamelCase_ = nn.BatchNormad(
num_features=__snake_case , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=__snake_case , track_running_stats=__snake_case , )
else:
lowerCamelCase_ = None
if use_activation:
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __snake_case ):
lowerCamelCase_ = ACTaFN[config.hidden_act]
else:
lowerCamelCase_ = config.hidden_act
else:
lowerCamelCase_ = None
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if self.config.tf_padding:
lowerCamelCase_ = apply_tf_padding(__snake_case , self.convolution )
lowerCamelCase_ = self.convolution(__snake_case )
if self.normalization is not None:
lowerCamelCase_ = self.normalization(__snake_case )
if self.activation is not None:
lowerCamelCase_ = self.activation(__snake_case )
return features
class __lowerCamelCase ( UpperCamelCase__ ):
a__: Optional[int] = MobileNetVaConfig
a__: str = load_tf_weights_in_mobilenet_va
a__: Dict = 'mobilenet_v1'
a__: List[Any] = 'pixel_values'
a__: Dict = False
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
A_ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
A_ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , UpperCamelCase__ , )
class __lowerCamelCase ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = True ):
super().__init__(__snake_case )
lowerCamelCase_ = config
lowerCamelCase_ = 32
lowerCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCamelCase_ = MobileNetVaConvLayer(
__snake_case , in_channels=config.num_channels , out_channels=__snake_case , kernel_size=3 , stride=2 , )
lowerCamelCase_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCamelCase_ = nn.ModuleList()
for i in range(13 ):
lowerCamelCase_ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=3 , stride=strides[i] , groups=__snake_case , ) )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=1 , ) )
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase__ ( self , UpperCAmelCase ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCamelCase_ = self.conv_stem(__snake_case )
lowerCamelCase_ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCamelCase_ = layer_module(__snake_case )
if output_hidden_states:
lowerCamelCase_ = all_hidden_states + (hidden_states,)
lowerCamelCase_ = hidden_states
if self.pooler is not None:
lowerCamelCase_ = torch.flatten(self.pooler(__snake_case ) , start_dim=1 )
else:
lowerCamelCase_ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=__snake_case , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCamelCase__ , )
class __lowerCamelCase ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(__snake_case )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = MobileNetVaModel(__snake_case )
lowerCamelCase_ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCamelCase_ = nn.Dropout(config.classifier_dropout_prob , inplace=__snake_case )
lowerCamelCase_ = nn.Linear(__snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.mobilenet_va(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ = self.classifier(self.dropout(__snake_case ) )
lowerCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ = """single_label_classification"""
else:
lowerCamelCase_ = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCamelCase_ = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ = BCEWithLogitsLoss()
lowerCamelCase_ = loss_fct(__snake_case , __snake_case )
if not return_dict:
lowerCamelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states , )
| 29 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 | 0 |
"""simple docstring"""
import heapq
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(a__ , [-1 * len(a__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCAmelCase :Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCAmelCase :Tuple = heapq.heappop(a__ )[1][0]
chosen_vertices.add(a__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCAmelCase :Tuple = elem[1][1].index(a__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(a__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""") | 553 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowercase ( UpperCamelCase__):
"""simple docstring"""
_A : str = """upernet"""
def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ):
super().__init__(**__snake_case )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__snake_case , __snake_case ):
snake_case_ : int = backbone_config.get("""model_type""" )
snake_case_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(__snake_case )
snake_case_ : Dict = backbone_config
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = initializer_range
snake_case_ : Tuple = pool_scales
snake_case_ : Optional[Any] = use_auxiliary_head
snake_case_ : List[str] = auxiliary_loss_weight
snake_case_ : int = auxiliary_in_channels
snake_case_ : Optional[int] = auxiliary_channels
snake_case_ : Optional[int] = auxiliary_num_convs
snake_case_ : int = auxiliary_concat_input
snake_case_ : Optional[Any] = loss_ignore_index
def __UpperCamelCase (self ):
snake_case_ : List[str] = copy.deepcopy(self.__dict__ )
snake_case_ : Dict = self.backbone_config.to_dict()
snake_case_ : Optional[int] = self.__class__.model_type
return output
| 480 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Any = logging.get_logger(__name__)
__a: List[Any] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase = '''encodec'''
def __init__( self : str , lowerCamelCase : Any=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase : Dict=2_4000 , lowerCamelCase : Optional[int]=1 , lowerCamelCase : str=False , lowerCamelCase : Optional[int]=None , lowerCamelCase : int=None , lowerCamelCase : Any=128 , lowerCamelCase : Dict=32 , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Union[str, Any]=[8, 5, 4, 2] , lowerCamelCase : Optional[int]="weight_norm" , lowerCamelCase : Dict=7 , lowerCamelCase : str=7 , lowerCamelCase : int=3 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Any="reflect" , lowerCamelCase : List[Any]=2 , lowerCamelCase : Any=2 , lowerCamelCase : Tuple=1.0 , lowerCamelCase : int=1024 , lowerCamelCase : Optional[int]=None , lowerCamelCase : str=True , **lowerCamelCase : Optional[int] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = target_bandwidths
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = audio_channels
_UpperCAmelCase = normalize
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = overlap
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_filters
_UpperCAmelCase = num_residual_layers
_UpperCAmelCase = upsampling_ratios
_UpperCAmelCase = norm_type
_UpperCAmelCase = kernel_size
_UpperCAmelCase = last_kernel_size
_UpperCAmelCase = residual_kernel_size
_UpperCAmelCase = dilation_growth_rate
_UpperCAmelCase = use_causal_conv
_UpperCAmelCase = pad_mode
_UpperCAmelCase = compress
_UpperCAmelCase = num_lstm_layers
_UpperCAmelCase = trim_right_ratio
_UpperCAmelCase = codebook_size
_UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__snake_case )
@property
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) ) | 108 |
UpperCAmelCase_ : Tuple = 0 # The first color of the flag.
UpperCAmelCase_ : Any = 1 # The second color of the flag.
UpperCAmelCase_ : str = 2 # The third color of the flag.
UpperCAmelCase_ : Tuple = (red, white, blue)
def lowerCAmelCase_ ( lowerCamelCase ):
if not sequence:
return []
if len(lowerCamelCase ) == 1:
return list(lowerCamelCase )
__magic_name__ : int =0
__magic_name__ : str =len(lowerCamelCase ) - 1
__magic_name__ : Optional[Any] =0
while mid <= high:
if sequence[mid] == colors[0]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__magic_name__ , __magic_name__ : Optional[Any] =sequence[high], sequence[mid]
high -= 1
else:
__magic_name__ : Optional[int] =F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 21 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.