code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a__ ( snake_case ):
"""simple docstring"""
return np.dot(__lowerCamelCase , __lowerCamelCase )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , *,
_A : float = np.inf , _A : str = "linear" , _A : float = 0.0 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = regularization
__SCREAMING_SNAKE_CASE : List[str] = gamma
if kernel == "linear":
__SCREAMING_SNAKE_CASE : Any = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__SCREAMING_SNAKE_CASE : Optional[int] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__SCREAMING_SNAKE_CASE : int = F'''Unknown kernel: {kernel}'''
raise ValueError(_snake_case )
def UpperCAmelCase__ ( self : str , _A : ndarray , _A : ndarray ):
"""simple docstring"""
return np.dot(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Dict , _A : ndarray , _A : ndarray ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase__ ( self : Optional[Any] , _A : list[ndarray] , _A : ndarray ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = observations
__SCREAMING_SNAKE_CASE : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(__SCREAMING_SNAKE_CASE ) : Optional[int] = np.shape(_snake_case )
def to_minimize(_A : ndarray ) -> float:
__SCREAMING_SNAKE_CASE : List[Any] = 0
(__SCREAMING_SNAKE_CASE ) : Dict = np.shape(_snake_case )
for i in range(_snake_case ):
for j in range(_snake_case ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = LinearConstraint(_snake_case , 0 , 0 )
__SCREAMING_SNAKE_CASE : Any = Bounds(0 , self.regularization )
__SCREAMING_SNAKE_CASE : Optional[int] = minimize(
_snake_case , np.ones(_snake_case ) , bounds=_snake_case , constraints=[ly_contraint] ).x
__SCREAMING_SNAKE_CASE : Union[str, Any] = l_star
# calculating mean offset of separation plane to points
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(_snake_case ):
for j in range(_snake_case ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = s / n
def UpperCAmelCase__ ( self : Optional[Any] , _A : ndarray ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _snake_case )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowerCamelCase__ : Dict =img
lowerCamelCase__ : List[Any] =img.shape[1]
lowerCamelCase__ : Optional[Any] =img.shape[0]
lowerCamelCase__ : List[Any] =dst_width
lowerCamelCase__ : Any =dst_height
lowerCamelCase__ : Union[str, Any] =self.src_w / self.dst_w
lowerCamelCase__ : str =self.src_h / self.dst_h
lowerCamelCase__ : Any =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCamelCase__ : Optional[int] =self.img[self.get_y(_snake_case )][self.get_x(_snake_case )]
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :int ):
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase , lowerCAmelCase = 8_00, 6_00
lowerCAmelCase = imread("""image_data/lena.jpg""", 1)
lowerCAmelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows() | 126 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase ( __lowerCamelCase : int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase : Tuple , __lowerCamelCase : int ) -> bool:
snake_case : Union[str, Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case : Optional[int] = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple = 0.0 , __lowerCamelCase : Dict = 1.0 , ):
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Any = 0.0 , __lowerCamelCase : List[str] = 1.0 ):
def identity_function(__lowerCamelCase : Optional[Any] ) -> float:
return x
snake_case : List[Any] = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case : Dict = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def UpperCamelCase ( __lowerCamelCase : List[str] ):
def function_to_integrate(__lowerCamelCase : List[Any] ) -> float:
return sqrt(4.0 - x * x )
snake_case : int = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model'}
_A = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
_A = {
'facebook/xglm-564M': 2048,
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_ = None , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__UpperCamelCase =7
__UpperCamelCase =[f'<madeupword{i}>' for i in range(self.num_madeup_words )]
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
__UpperCamelCase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase =1
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__UpperCamelCase =len(self.sp_model )
__UpperCamelCase ={f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> Any:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__UpperCamelCase =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _a ( self ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def _a ( self , A_ ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase =self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self , A_ ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =''''''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(_snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 62 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowercase__ : int = None
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase__ : Tuple = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowercase__ : Any = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
lowercase__ : str = """▁"""
class UpperCamelCase__ ( A_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = AlbertTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Optional[int]="[CLS]" , SCREAMING_SNAKE_CASE_ : List[Any]="[SEP]" , SCREAMING_SNAKE_CASE_ : Tuple="<unk>" , SCREAMING_SNAKE_CASE_ : int="[SEP]" , SCREAMING_SNAKE_CASE_ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[int]="[CLS]" , SCREAMING_SNAKE_CASE_ : int="[MASK]" , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase_ : List[str] = (
AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case , normalized=_snake_case )
if isinstance(_snake_case , _snake_case )
else mask_token
)
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
lowerCAmelCase_ : Optional[int] = do_lower_case
lowerCAmelCase_ : str = remove_space
lowerCAmelCase_ : Dict = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ : List[Any] = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 224 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> list[int]:
__A : Union[str, Any] = [0] * no_of_processes
__A : Dict = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowerCamelCase ):
__A : int = burst_time[i]
__A : Dict = 0
__A : Optional[int] = 0
__A : Tuple = 9_99_99_99_99
__A : List[str] = 0
__A : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__A : Tuple = remaining_time[j]
__A : List[Any] = j
__A : Optional[int] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__A : int = remaining_time[short]
if minm == 0:
__A : List[Any] = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__A : List[Any] = False
# Find finish time of current process
__A : str = increment_time + 1
# Calculate waiting time
__A : Optional[Any] = finish_time - arrival_time[short]
__A : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
__A : List[str] = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] ) -> list[int]:
__A : Optional[int] = [0] * no_of_processes
for i in range(__lowerCamelCase ):
__A : int = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] ) -> None:
__A : Union[str, Any] = 0
__A : Tuple = 0
for i in range(__lowerCamelCase ):
__A : Optional[int] = total_waiting_time + waiting_time[i]
__A : Tuple = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowercase__ : Any = int(input())
lowercase__ : Union[str, Any] = [0] * no_of_processes
lowercase__ : Any = [0] * no_of_processes
lowercase__ : Union[str, Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowercase__ , lowercase__ : Union[str, Any] = map(int, input().split())
lowercase__ : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase__ : List[str] = burst_time
lowercase__ : Optional[Any] = no_of_processes
lowercase__ : int = waiting_time
lowercase__ : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase__ : Union[str, Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs) | 190 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase ( A_ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = PriorTransformer
__lowerCAmelCase = "hidden_states"
@property
def _lowerCamelCase ( self ):
__a : Dict = 4
__a : List[Any] = 8
__a : Any = 7
__a : int = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
__a : Any = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
__a : int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
torch.manual_seed(_snake_case )
__a : Optional[Any] = 4
__a : Optional[int] = 8
__a : Any = 7
__a : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
__a : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
__a : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCamelCase ( self ):
return (4, 8)
@property
def _lowerCamelCase ( self ):
return (4, 8)
def _lowerCamelCase ( self ):
__a : Tuple = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
__a : Any = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
__a : Tuple = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCamelCase ( self ):
__a : Any = self.prepare_init_args_and_inputs_for_common()
__a : List[str] = self.model_class(**_snake_case )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : int = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , _snake_case )
def _lowerCamelCase ( self ):
__a : Any = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
__a : Dict = model.to(_snake_case )
if hasattr(_snake_case , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
__a : Dict = self.get_dummy_seed_input()
with torch.no_grad():
__a : Optional[int] = model(**_snake_case )[0]
__a : str = output[0, :5].flatten().cpu()
print(_snake_case )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__a : Optional[Any] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1e-2 ) )
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self , _UpperCAmelCase=1 , _UpperCAmelCase=768 , _UpperCAmelCase=77 , _UpperCAmelCase=0 ):
torch.manual_seed(_snake_case )
__a : Tuple = batch_size
__a : List[str] = embedding_dim
__a : Tuple = num_embeddings
__a : int = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
__a : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
__a : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(_snake_case )
__a : List[str] = self.get_dummy_seed_input(seed=_snake_case )
with torch.no_grad():
__a : List[str] = model(**_snake_case )[0]
assert list(sample.shape ) == [1, 768]
__a : List[str] = sample[0, :8].flatten().cpu()
print(_snake_case )
__a : Optional[Any] = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1e-3 ) | 160 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ):
super().tearDown()
gc.collect()
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Dict = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=_snake_case, from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : Union[str, Any] = controlnet_params
UpperCAmelCase : Optional[Any] = '''bird'''
UpperCAmelCase : str = jax.device_count()
UpperCAmelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
UpperCAmelCase : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase : str = jax.random.PRNGKey(0 )
UpperCAmelCase : Dict = jax.random.split(_snake_case, jax.device_count() )
UpperCAmelCase : Any = replicate(_snake_case )
UpperCAmelCase : List[str] = shard(_snake_case )
UpperCAmelCase : Dict = shard(_snake_case )
UpperCAmelCase : Optional[Any] = pipe(
prompt_ids=_snake_case, image=_snake_case, params=_snake_case, prng_seed=_snake_case, num_inference_steps=5_0, jit=_snake_case, ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Any = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : int = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=_snake_case, from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : Optional[Any] = controlnet_params
UpperCAmelCase : Optional[Any] = '''Chef in the kitchen'''
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[str] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
UpperCAmelCase : Tuple = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[int] = jax.random.split(_snake_case, jax.device_count() )
UpperCAmelCase : Any = replicate(_snake_case )
UpperCAmelCase : Optional[int] = shard(_snake_case )
UpperCAmelCase : List[str] = shard(_snake_case )
UpperCAmelCase : str = pipe(
prompt_ids=_snake_case, image=_snake_case, params=_snake_case, prng_seed=_snake_case, num_inference_steps=5_0, jit=_snake_case, ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCAmelCase : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Tuple = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 336 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
__A = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
__A = "</w>"
__A = "@@ "
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: Any = set()
__lowerCAmelCase: Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase: Any = char
return pairs
# Speech2Text2 has no max input length
__A = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class snake_case ( A_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Tuple , )-> int:
'''simple docstring'''
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
__lowerCAmelCase: Optional[int] = do_lower_case
with open(_snake_case , encoding="utf-8") as vocab_handle:
__lowerCAmelCase: List[str] = json.load(_snake_case)
__lowerCAmelCase: Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
__lowerCAmelCase: Union[str, Any] = None
__lowerCAmelCase: int = None
else:
with open(_snake_case , encoding="utf-8") as merges_handle:
__lowerCAmelCase: Tuple = merges_handle.read().split("\n")[:-1]
__lowerCAmelCase: Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
__lowerCAmelCase: Tuple = dict(zip(_snake_case , range(len(_snake_case))))
__lowerCAmelCase: Any = {}
@property
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
return len(self.decoder)
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCAmelCase: List[str] = get_pairs(_snake_case)
if not pairs:
return token
while True:
__lowerCAmelCase: List[str] = min(_snake_case , key=lambda UpperCamelCase__: self.bpe_ranks.get(_snake_case , float("inf")))
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase: Tuple = bigram
__lowerCAmelCase: int = []
__lowerCAmelCase: int = 0
while i < len(_snake_case):
try:
__lowerCAmelCase: Any = word.index(_snake_case , _snake_case)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__lowerCAmelCase: Tuple = j
if word[i] == first and i < len(_snake_case) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__lowerCAmelCase: List[Any] = tuple(_snake_case)
__lowerCAmelCase: Dict = new_word
if len(_snake_case) == 1:
break
else:
__lowerCAmelCase: List[Any] = get_pairs(_snake_case)
__lowerCAmelCase: Optional[Any] = ''' '''.join(_snake_case)
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCAmelCase: Union[str, Any] = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case):
__lowerCAmelCase: Optional[int] = word.replace(_snake_case , "")
__lowerCAmelCase: Union[str, Any] = word.replace(" " , _snake_case)
__lowerCAmelCase: Optional[Any] = word
return word
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Dict)-> Optional[int]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
__lowerCAmelCase: Tuple = text.lower()
__lowerCAmelCase: Optional[Any] = text.split()
__lowerCAmelCase: Dict = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case).split(" ")))
return split_tokens
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str)-> int:
'''simple docstring'''
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token))
def lowercase_ ( self : Dict , UpperCamelCase__ : int)-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.decoder.get(_snake_case , self.unk_token)
return result
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ''' '''.join(_snake_case)
# make sure @@ tokens are concatenated
__lowerCAmelCase: Tuple = ''''''.join(string.split(_snake_case))
return string
def lowercase_ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCAmelCase: str = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: int = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(_snake_case , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case) + "\n")
__lowerCAmelCase: List[str] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
__lowerCAmelCase: Tuple = token_index
writer.write(" ".join(_snake_case) + "\n")
index += 1
return (vocab_file, merges_file)
| 217 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 0 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowercase : int = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
return field(default_factory=lambda: default , metadata=__lowerCamelCase )
@dataclass
class __UpperCamelCase :
A_ = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
A_ = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
A_ = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
A_ = field(
default=A_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
A_ = field(
default=A_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
A_ = field(
default=A_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
A_ = field(default=A_ , metadata={"help": "Use FP16 to accelerate inference."} )
A_ = field(default=A_ , metadata={"help": "Benchmark training of model"} )
A_ = field(default=A_ , metadata={"help": "Verbose memory tracing"} )
A_ = field(
default=A_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
A_ = field(
default=A_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
A_ = field(default=A_ , metadata={"help": "Trace memory line by line"} )
A_ = field(default=A_ , metadata={"help": "Save result to a CSV file"} )
A_ = field(default=A_ , metadata={"help": "Save all print statements in a log file"} )
A_ = field(default=A_ , metadata={"help": "Whether to print environment information"} )
A_ = field(
default=A_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
A_ = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
A_ = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
A_ = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
A_ = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
A_ = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
A_ = field(
default=f"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
A_ = field(default=3 , metadata={"help": "Times an experiment will be run."} )
A_ = field(
default=A_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _snake_case , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 27 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCAmelCase :
def __init__( self : Dict , __snake_case : Dict , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Dict="resnet50" , __snake_case : Dict=3 , __snake_case : Union[str, Any]=32 , __snake_case : int=3 , __snake_case : List[Any]=True , __snake_case : Optional[int]=True , ) -> Any:
_lowerCAmelCase = parent
_lowerCAmelCase = out_indices if out_indices is not None else [4]
_lowerCAmelCase = stage_names
_lowerCAmelCase = out_features
_lowerCAmelCase = backbone
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = is_training
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase__ ( self : List[str] ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self : str , __snake_case : Any , __snake_case : List[str] ) -> Any:
_lowerCAmelCase = TimmBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCAmelCase ( A_ , A_ , A_ , unittest.TestCase ):
_lowercase: Optional[Any] = (TimmBackbone,) if is_torch_available() else ()
_lowercase: List[Any] = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_lowercase: int = False
_lowercase: List[str] = False
_lowercase: Tuple = False
_lowercase: int = False
def lowercase__ ( self : Any ) -> int:
_lowerCAmelCase = TimmBackboneModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase = '''resnet18'''
_lowerCAmelCase = '''microsoft/resnet-18'''
_lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , use_timm_backbone=_snake_case )
_lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , use_timm_backbone=_snake_case , out_indices=[1, 2, 3] )
_lowerCAmelCase = AutoBackbone.from_pretrained(_snake_case , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn\'t support feed forward chunking""" )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip("""TimmBackbone doesn\'t have num_hidden_layers attribute""" )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowercase__ ( self : str ) -> List[Any]:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def lowercase__ ( self : int ) -> Any:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def lowercase__ ( self : str ) -> List[Any]:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowercase__ ( self : str ) -> int:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase__ ( self : Tuple ) -> Tuple:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def lowercase__ ( self : Any ) -> int:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def lowercase__ ( self : Optional[Any] ) -> Any:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase__ ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip("""TimmBackbone doesn\'t have hidden size info in its configuration.""" )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip("""TimmBackbone doesn\'t support output_attentions.""" )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowercase__ ( self : int ) -> Optional[int]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : List[Any] ) -> int:
pass
def lowercase__ ( self : List[str] ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def lowercase__ ( self : Any ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowerCAmelCase = self.all_model_classes[0]
_lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
_lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case )
_lowerCAmelCase = model(**_snake_case )
_lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_snake_case )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(**_snake_case )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowerCAmelCase = copy.deepcopy(_snake_case )
_lowerCAmelCase = None
_lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(**_snake_case )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowerCAmelCase = copy.deepcopy(_snake_case )
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(**_snake_case )
| 70 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __UpperCamelCase ( A_ ):
"""simple docstring"""
lowerCAmelCase_ = "blip_2_vision_model"
def __init__( self : Optional[Any] , _A : Union[str, Any]=1408 , _A : List[str]=6144 , _A : Optional[Any]=39 , _A : Dict=16 , _A : int=224 , _A : str=14 , _A : Tuple="gelu" , _A : Optional[int]=0.0_00_01 , _A : Any=0.0 , _A : List[str]=1e-10 , _A : Optional[int]=True , **_A : List[str] , ):
"""simple docstring"""
super().__init__(**_snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : str = patch_size
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : str = qkv_bias
@classmethod
def UpperCAmelCase__ ( cls : List[str] , _A : Union[str, os.PathLike] , **_A : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
__SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__SCREAMING_SNAKE_CASE : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case , **_snake_case )
class __UpperCamelCase ( A_ ):
"""simple docstring"""
lowerCAmelCase_ = "blip_2_qformer"
def __init__( self : List[str] , _A : int=3_0522 , _A : str=768 , _A : List[str]=12 , _A : List[str]=12 , _A : Union[str, Any]=3072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : int=0.1 , _A : Optional[Any]=512 , _A : Optional[int]=0.02 , _A : List[Any]=1e-12 , _A : Union[str, Any]=0 , _A : Tuple="absolute" , _A : Dict=2 , _A : Union[str, Any]=1408 , **_A : int , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : str = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : str = position_embedding_type
__SCREAMING_SNAKE_CASE : List[str] = cross_attention_frequency
__SCREAMING_SNAKE_CASE : str = encoder_hidden_size
@classmethod
def UpperCAmelCase__ ( cls : Tuple , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
__SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(_snake_case , **_snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__SCREAMING_SNAKE_CASE : Tuple = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case , **_snake_case )
class __UpperCamelCase ( A_ ):
"""simple docstring"""
lowerCAmelCase_ = "blip-2"
lowerCAmelCase_ = True
def __init__( self : List[Any] , _A : str=None , _A : Tuple=None , _A : Optional[Any]=None , _A : List[Any]=32 , **_A : List[Any] ):
"""simple docstring"""
super().__init__(**_snake_case )
if vision_config is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(**_snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BlipaQFormerConfig(**_snake_case )
__SCREAMING_SNAKE_CASE : Tuple = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[text_model_type](**_snake_case )
__SCREAMING_SNAKE_CASE : int = self.text_config.tie_word_embeddings
__SCREAMING_SNAKE_CASE : Any = self.text_config.is_encoder_decoder
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_query_tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_config.hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__SCREAMING_SNAKE_CASE : List[str] = 1.0
__SCREAMING_SNAKE_CASE : Any = 0.02
@classmethod
def UpperCAmelCase__ ( cls : Tuple , _A : BlipaVisionConfig , _A : BlipaQFormerConfig , _A : PretrainedConfig , **_A : int , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_snake_case , )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE : Dict = self.qformer_config.to_dict()
__SCREAMING_SNAKE_CASE : List[str] = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = self.__class__.model_type
return output
| 303 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
lowerCAmelCase = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names}
lowerCAmelCase = {f"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class A_ ( A_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = FunnelTokenizer
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = 2
def __init__( self :Optional[int] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Optional[int]="<unk>" , lowerCamelCase_ :Dict="<sep>" , lowerCamelCase_ :Any="<pad>" , lowerCamelCase_ :str="<cls>" , lowerCamelCase_ :Optional[Any]="<mask>" , lowerCamelCase_ :int="<s>" , lowerCamelCase_ :Dict="</s>" , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Dict=None , lowerCamelCase_ :str="##" , **lowerCamelCase_ :Optional[Any] , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , clean_text=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , wordpieces_prefix=_snake_case , **_snake_case , )
lowerCamelCase__ : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCamelCase__ : List[str] =getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCamelCase__ : List[str] =do_lower_case
lowerCamelCase__ : Any =strip_accents
lowerCamelCase__ : Union[str, Any] =tokenize_chinese_chars
lowerCamelCase__ : Union[str, Any] =normalizer_class(**_snake_case )
lowerCamelCase__ : List[str] =do_lower_case
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=None ):
"""simple docstring"""
lowerCamelCase__ : Any =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[self.sep_token_id]
lowerCamelCase__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : str =self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case ) | 126 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=A_ ):
A__ : List[str] = ["torch", "torchsde"]
def __init__(self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : List[str] , *snake_case__ : int , **snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : List[Any] , *snake_case__ : List[Any] , **snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 59 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_A = ['bert-base-uncased', 'bert-base-cased']
_A = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class UpperCAmelCase__ ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , A_ ) -> List[str]:
super().__init__()
__UpperCamelCase =tokenizer
__UpperCamelCase =AutoConfig.from_pretrained(_snake_case )
__UpperCamelCase =TFAutoModel.from_config(_snake_case )
def _a ( self , A_ ) -> Optional[Any]:
__UpperCamelCase =self.tokenizer(_snake_case )
__UpperCamelCase =self.bert(**_snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> List[str]:
super().setUp()
__UpperCamelCase =[
BertTokenizer.from_pretrained(_snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__UpperCamelCase =[TFBertTokenizer.from_pretrained(_snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_snake_case , use_fast_bert_tokenizer=_snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCamelCase =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCamelCase =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _a ( self ) -> Dict:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase =tokenizer(_snake_case , return_tensors='tf' , padding='longest' )
__UpperCamelCase =tf_tokenizer(_snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _a ( self ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase =tf_tokenizer(self.paired_sentences )
__UpperCamelCase =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _a ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase =tf.function(_snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase =tf.constant(_snake_case )
__UpperCamelCase =compiled_tokenizer(_snake_case )
__UpperCamelCase =tf_tokenizer(_snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _a ( self ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase =ModelToSave(tokenizer=_snake_case )
__UpperCamelCase =tf.convert_to_tensor(self.test_sentences )
__UpperCamelCase =model(_snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCamelCase =Path(_snake_case ) / '''saved.model'''
model.save(_snake_case )
__UpperCamelCase =tf.keras.models.load_model(_snake_case )
__UpperCamelCase =loaded_model(_snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 62 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Any = """PoolFormerConfig"""
# Base docstring
lowercase__ : Optional[Any] = """sail/poolformer_s12"""
lowercase__ : int = [1, 5_1_2, 7, 7]
# Image classification docstring
lowercase__ : Dict = """sail/poolformer_s12"""
lowercase__ : str = """tabby, tabby cat"""
lowercase__ : List[str] = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] = 0.0 , lowerCAmelCase__ : Tuple = False ) -> Optional[Any]:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
lowerCAmelCase_ : int = 1 - drop_prob
lowerCAmelCase_ : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCAmelCase_ : Union[str, Any] = keep_prob + torch.rand(__lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCAmelCase_ : int = input.div(__lowerCamelCase ) * random_tensor
return output
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[float] = None ):
super().__init__()
lowerCAmelCase_ : Dict = drop_prob
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
return drop_path(_snake_case , self.drop_prob , self.training )
def SCREAMING_SNAKE_CASE__ ( self : str ):
return "p={}".format(self.drop_prob )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
super().__init__()
lowerCAmelCase_ : List[str] = patch_size if isinstance(_snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCAmelCase_ : str = stride if isinstance(_snake_case , collections.abc.Iterable ) else (stride, stride)
lowerCAmelCase_ : Union[str, Any] = padding if isinstance(_snake_case , collections.abc.Iterable ) else (padding, padding)
lowerCAmelCase_ : int = nn.Convad(_snake_case , _snake_case , kernel_size=_snake_case , stride=_snake_case , padding=_snake_case )
lowerCAmelCase_ : int = norm_layer(_snake_case ) if norm_layer else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : List[Any] = self.projection(_snake_case )
lowerCAmelCase_ : Tuple = self.norm(_snake_case )
return embeddings
class UpperCamelCase__ ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(1 , _snake_case , **_snake_case )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.AvgPoolad(_snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=_snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.pool(_snake_case ) - hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
super().__init__()
lowerCAmelCase_ : Dict = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase_ : Any = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase_ : int = PoolFormerDropPath(_snake_case )
if isinstance(config.hidden_act , _snake_case ):
lowerCAmelCase_ : List[Any] = ACTaFN[config.hidden_act]
else:
lowerCAmelCase_ : Dict = config.hidden_act
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.conva(_snake_case )
lowerCAmelCase_ : Any = self.act_fn(_snake_case )
lowerCAmelCase_ : Union[str, Any] = self.drop(_snake_case )
lowerCAmelCase_ : int = self.conva(_snake_case )
lowerCAmelCase_ : str = self.drop(_snake_case )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__()
lowerCAmelCase_ : List[Any] = PoolFormerPooling(_snake_case )
lowerCAmelCase_ : int = PoolFormerOutput(_snake_case , _snake_case , _snake_case , _snake_case )
lowerCAmelCase_ : str = PoolFormerGroupNorm(_snake_case )
lowerCAmelCase_ : Optional[Any] = PoolFormerGroupNorm(_snake_case )
# Useful for training neural nets
lowerCAmelCase_ : Optional[Any] = PoolFormerDropPath(_snake_case ) if drop_path > 0.0 else nn.Identity()
lowerCAmelCase_ : str = config.use_layer_scale
if config.use_layer_scale:
lowerCAmelCase_ : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) , requires_grad=_snake_case )
lowerCAmelCase_ : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) , requires_grad=_snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if self.use_layer_scale:
lowerCAmelCase_ : List[str] = self.pooling(self.before_norm(_snake_case ) )
lowerCAmelCase_ : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCAmelCase_ : Any = hidden_states + self.drop_path(_snake_case )
lowerCAmelCase_ : int = ()
lowerCAmelCase_ : List[str] = self.output(self.after_norm(_snake_case ) )
lowerCAmelCase_ : Optional[int] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCAmelCase_ : Optional[int] = hidden_states + self.drop_path(_snake_case )
lowerCAmelCase_ : Optional[int] = (output,) + outputs
return outputs
else:
lowerCAmelCase_ : Any = self.drop_path(self.pooling(self.before_norm(_snake_case ) ) )
# First residual connection
lowerCAmelCase_ : Dict = pooling_output + hidden_states
lowerCAmelCase_ : Tuple = ()
# Second residual connection inside the PoolFormerOutput block
lowerCAmelCase_ : Union[str, Any] = self.drop_path(self.output(self.after_norm(_snake_case ) ) )
lowerCAmelCase_ : Optional[Any] = hidden_states + layer_output
lowerCAmelCase_ : Dict = (output,) + outputs
return outputs
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = config
# stochastic depth decay rule
lowerCAmelCase_ : List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCAmelCase_ : Any = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCAmelCase_ : Optional[Any] = nn.ModuleList(_snake_case )
# Transformer blocks
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCAmelCase_ : int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_snake_case ) )
lowerCAmelCase_ : Tuple = nn.ModuleList(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
lowerCAmelCase_ : List[str] = () if output_hidden_states else None
lowerCAmelCase_ : Tuple = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCAmelCase_ : Dict = layers
# Get patch embeddings from hidden_states
lowerCAmelCase_ : str = embedding_layer(_snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(_snake_case ):
lowerCAmelCase_ : Any = blk(_snake_case )
lowerCAmelCase_ : Dict = layer_outputs[0]
if output_hidden_states:
lowerCAmelCase_ : Tuple = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
class UpperCamelCase__ ( A_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PoolFormerConfig
_SCREAMING_SNAKE_CASE = "poolformer"
_SCREAMING_SNAKE_CASE = "pixel_values"
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
if isinstance(_snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase_ : Optional[int] = value
lowercase__ : str = R"""\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"""
lowercase__ : Tuple = R"""\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"""
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""", A_, )
class UpperCamelCase__ ( A_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
super().__init__(_snake_case )
lowerCAmelCase_ : List[Any] = config
lowerCAmelCase_ : Optional[int] = PoolFormerEncoder(_snake_case )
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self : str ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
lowerCAmelCase_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
lowerCAmelCase_ : Tuple = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , )
lowerCAmelCase_ : Optional[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_snake_case , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ):
super().__init__()
lowerCAmelCase_ : Dict = nn.Linear(config.hidden_size , config.hidden_size )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : List[Any] = self.dense(_snake_case )
return output
@add_start_docstrings(
"""\n PoolFormer Model transformer with an image classification head on top\n """, A_, )
class UpperCamelCase__ ( A_ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int ):
super().__init__(_snake_case )
lowerCAmelCase_ : Optional[Any] = config.num_labels
lowerCAmelCase_ : Optional[Any] = PoolFormerModel(_snake_case )
# Final norm
lowerCAmelCase_ : Tuple = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCAmelCase_ : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
lowerCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = self.poolformer(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , )
lowerCAmelCase_ : Dict = outputs[0]
lowerCAmelCase_ : Optional[int] = self.classifier(self.norm(_snake_case ).mean([-2, -1] ) )
lowerCAmelCase_ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : int = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : List[str] = '''single_label_classification'''
else:
lowerCAmelCase_ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase_ : List[Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ : Tuple = loss_fct(_snake_case , _snake_case )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : Any = CrossEntropyLoss()
lowerCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : str = BCEWithLogitsLoss()
lowerCAmelCase_ : Dict = loss_fct(_snake_case , _snake_case )
if not return_dict:
lowerCAmelCase_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 224 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0):
'''simple docstring'''
__A : int = row, column
__A : Any = [[default_value for c in range(_snake_case)] for r in range(_snake_case)]
def __str__( self):
'''simple docstring'''
__A : List[str] = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__A : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__A : str = max(_snake_case , len(str(_snake_case)))
__A : Tuple = F'%{max_element_length}s'
# Make string and return
def single_line(_UpperCAmelCase) -> str:
nonlocal string_format_identifier
__A : Dict = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(_snake_case) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not (isinstance(_snake_case , (list, tuple)) and len(_snake_case) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
assert self.validate_indicies(_snake_case)
return self.array[loc[0]][loc[1]]
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
assert self.validate_indicies(_snake_case)
__A : Any = value
def __add__( self , _UpperCAmelCase):
'''simple docstring'''
assert isinstance(_snake_case , _snake_case)
assert self.row == another.row and self.column == another.column
# Add
__A : List[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__A : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
__A : Dict = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__A : List[str] = -self[r, c]
return result
def __sub__( self , _UpperCAmelCase):
'''simple docstring'''
return self + (-another)
def __mul__( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_snake_case , (int, float)): # Scalar multiplication
__A : str = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__A : Tuple = self[r, c] * another
return result
elif isinstance(_snake_case , _snake_case): # Matrix multiplication
assert self.column == another.row
__A : Any = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__A : Union[str, Any] = F'Unsupported type given for another ({type(_snake_case)})'
raise TypeError(_snake_case)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
__A : Dict = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
assert isinstance(_snake_case , _snake_case) and isinstance(_snake_case , _snake_case)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__A : Dict = v.transpose()
__A : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCAmelCase ( ) -> None:
# a^(-1)
__A : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__A : List[Any] = 1
print(f'a^(-1) is {ainv}' )
# u, v
__A : Tuple = Matrix(3 , 1 , 0 )
__A : Dict = 1, 2, -3
__A : List[Any] = Matrix(3 , 1 , 0 )
__A : str = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCamelCase , __lowerCamelCase )}' )
def _lowerCAmelCase ( ) -> None:
import doctest
doctest.testmod()
testa() | 190 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( A_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
if extra_ids > 0 and additional_special_tokens is None:
__a : Tuple = [f"""<extra_id_{i}>""" for i in range(_snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : List[Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_snake_case ) ) , _snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_snake_case , tokenizer_file=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , extra_ids=_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
__a : List[Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _snake_case , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _snake_case ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_snake_case ) for token in self.get_sentinel_tokens()] | 160 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __UpperCAmelCase ( A_ , A_ ):
UpperCamelCase = "focalnet"
def __init__( self : Any, __A : Optional[int]=2_2_4, __A : Tuple=4, __A : Optional[Any]=3, __A : Optional[Any]=9_6, __A : List[Any]=False, __A : Optional[int]=[1_9_2, 3_8_4, 7_6_8, 7_6_8], __A : Optional[Any]=[2, 2, 6, 2], __A : Optional[int]=[2, 2, 2, 2], __A : List[str]=[3, 3, 3, 3], __A : List[str]="gelu", __A : Dict=4.0, __A : str=0.0, __A : Any=0.1, __A : Dict=False, __A : List[Any]=1E-4, __A : Dict=False, __A : Optional[int]=False, __A : Optional[Any]=False, __A : Dict=0.0_2, __A : Optional[int]=1E-5, __A : Optional[int]=3_2, __A : Any=None, __A : List[Any]=None, **__A : List[Any], ):
super().__init__(**_snake_case )
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : str = patch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Dict = embed_dim
UpperCAmelCase : List[str] = use_conv_embed
UpperCAmelCase : Optional[Any] = hidden_sizes
UpperCAmelCase : Dict = depths
UpperCAmelCase : List[Any] = focal_levels
UpperCAmelCase : Tuple = focal_windows
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Tuple = mlp_ratio
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : str = drop_path_rate
UpperCAmelCase : str = use_layerscale
UpperCAmelCase : List[Any] = layerscale_value
UpperCAmelCase : Optional[Any] = use_post_layernorm
UpperCAmelCase : str = use_post_layernorm_in_modulation
UpperCAmelCase : Dict = normalize_modulator
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = layer_norm_eps
UpperCAmelCase : Any = encoder_stride
UpperCAmelCase : int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1, len(self.depths ) + 1 )]
UpperCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=_snake_case, out_indices=_snake_case, stage_names=self.stage_names )
| 336 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ["sentencepiece"]
def __init__( self : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict)-> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Dict = ["sentencepiece"]
def __init__( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : str)-> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : List[str] = ["sentencepiece"]
def __init__( self : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[Any])-> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str)-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ["sentencepiece"]
def __init__( self : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Any)-> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ["sentencepiece"]
def __init__( self : Optional[int] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Any)-> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Dict = ["sentencepiece"]
def __init__( self : List[str] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any])-> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ["sentencepiece"]
def __init__( self : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : int)-> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Dict = ["sentencepiece"]
def __init__( self : int , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int)-> Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : str = ["sentencepiece"]
def __init__( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any])-> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : List[str] = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict)-> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any])-> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any)-> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : int = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any])-> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : str = ["sentencepiece"]
def __init__( self : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int)-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : str = ["sentencepiece"]
def __init__( self : Dict , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any])-> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ["sentencepiece"]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any])-> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ["sentencepiece"]
def __init__( self : Dict , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Any = ["sentencepiece"]
def __init__( self : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : List[str] = ["sentencepiece"]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any])-> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : int = ["sentencepiece"]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Any = ["sentencepiece"]
def __init__( self : Dict , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any])-> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : str = ["sentencepiece"]
def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str])-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Dict = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : str = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ["sentencepiece"]
def __init__( self : Any , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any])-> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str)-> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Tuple = ["sentencepiece"]
def __init__( self : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any])-> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class snake_case ( metaclass=A_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
| 217 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__lowercase : str = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
A_ = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
A_ = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
A_ = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A_ = field(
default=A_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A_ = field(
default=A_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
A_ = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A_ = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A_ = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
A_ = field(
default=A_ , metadata={"help": "A csv or a json file containing the training data."} )
A_ = field(
default=A_ , metadata={"help": "A csv or a json file containing the validation data."} )
A_ = field(default=A_ , metadata={"help": "A csv or a json file containing the test data."} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a : List[str] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a : Optional[int] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __UpperCamelCase :
A_ = field(
default=A_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A_ = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A_ = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A_ = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A_ = field(
default=A_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A_ = field(
default=A_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a : Tuple = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a : str = data_args.train_file.split('.' )[-1]
__a : Tuple = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a : Dict = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a : Union[str, Any] = load_dataset('csv' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a : Optional[Any] = load_dataset('json' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a : int = raw_datasets['''train'''].features['''label'''].names
__a : List[Any] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCamelCase , )
__a : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a : str = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a : Any = {'''Refused''': 0, '''Entailed''': 1}
__a : str = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__a : str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_SCREAMING_SNAKE_CASE : Optional[int] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Dict = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a : Tuple = examples['''statement''']
__a : str = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a : Dict = tokenizer(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase )
__a : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a : List[Any] = raw_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__a : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__a : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a : Optional[Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
__a : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
__a : Dict = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a : List[str] = default_data_collator
elif training_args.fpaa:
__a : Any = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
__a : List[Any] = None
# Initialize our Trainer
__a : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
__a : Dict = None
if training_args.resume_from_checkpoint is not None:
__a : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : int = last_checkpoint
__a : List[str] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
__a : List[str] = train_result.metrics
__a : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
__a : Any = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __lowerCamelCase )
trainer.save_metrics('train' , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a : Union[str, Any] = trainer.evaluate(eval_dataset=__lowerCamelCase )
__a : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
__a : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('eval' , __lowerCamelCase )
trainer.save_metrics('eval' , __lowerCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a : Tuple = predict_dataset.remove_columns('label' )
__a : str = trainer.predict(__lowerCamelCase , metric_key_prefix='predict' ).predictions
__a : Tuple = np.argmax(__lowerCamelCase , axis=1 )
__a : List[Any] = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__lowerCamelCase ):
__a : Optional[Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
__a : Dict = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 27 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__( self : str , __snake_case : int , __snake_case : Optional[Any]=13 , __snake_case : List[Any]=32 , __snake_case : int=3 , __snake_case : str=4 , __snake_case : Optional[int]=[10, 20, 30, 40] , __snake_case : Optional[Any]=[2, 2, 3, 2] , __snake_case : Optional[int]=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=37 , __snake_case : int="gelu" , __snake_case : str=10 , __snake_case : Union[str, Any]=0.02 , __snake_case : List[Any]=["stage2", "stage3", "stage4"] , __snake_case : Any=[2, 3, 4] , __snake_case : Optional[Any]=None , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_stages
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_labels
_lowerCAmelCase = initializer_range
_lowerCAmelCase = out_features
_lowerCAmelCase = out_indices
_lowerCAmelCase = scope
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase__ ( self : Dict , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> List[str]:
_lowerCAmelCase = ConvNextModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self : int , __snake_case : int , __snake_case : Optional[int] , __snake_case : Tuple ) -> List[str]:
_lowerCAmelCase = ConvNextForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Any ) -> Union[str, Any]:
_lowerCAmelCase = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCAmelCase = None
_lowerCAmelCase = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ , A_ , unittest.TestCase ):
_lowercase: Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowercase: Union[str, Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowercase: Optional[Any] = True
_lowercase: Optional[Any] = False
_lowercase: Optional[Any] = False
_lowercase: Optional[Any] = False
_lowercase: Dict = False
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = ConvNextModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[Any] ) -> int:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def lowercase__ ( self : str ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def lowercase__ ( self : Union[str, Any] ) -> int:
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] ):
_lowerCAmelCase = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def lowercase__ ( self : int ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def lowercase__ ( self : List[Any] ) -> List[Any]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = ConvNextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Optional[int]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_snake_case )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).to(_snake_case )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_snake_case )
# verify the logits
_lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
_lowerCAmelCase = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase , A_ ):
_lowercase: str = (ConvNextBackbone,) if is_torch_available() else ()
_lowercase: int = ConvNextConfig
_lowercase: List[str] = False
def lowercase__ ( self : Dict ) -> List[str]:
_lowerCAmelCase = ConvNextModelTester(self )
| 70 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 0 |
import torch
def a__ ( ):
"""simple docstring"""
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE : List[str] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 303 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : int = 1_0_0 ) ->int:
lowerCamelCase__ : List[Any] =(n * (n + 1) // 2) ** 2
lowerCamelCase__ : Optional[int] =n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""") | 126 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 0 |
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> int:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
__UpperCamelCase =LlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__UpperCamelCase =model(_snake_case , attention_mask=_snake_case )
__UpperCamelCase =model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[str]:
__UpperCamelCase =True
__UpperCamelCase =LlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
__UpperCamelCase =model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
__UpperCamelCase =model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
__UpperCamelCase =model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Union[str, Any]:
__UpperCamelCase =LlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
__UpperCamelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Dict:
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =LlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
__UpperCamelCase =model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
__UpperCamelCase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase =torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase =model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0]
__UpperCamelCase =model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0]
# select random slice
__UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
__UpperCamelCase
) =config_and_inputs
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : int = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[Any] = False
def _a ( self ) -> str:
__UpperCamelCase =LlamaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self ) -> str:
self.config_tester.run_common_tests()
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase =input_dict['''input_ids''']
__UpperCamelCase =input_ids.ne(1 ).to(_snake_case )
__UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase =LlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
__UpperCamelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase ='''single_label_classification'''
__UpperCamelCase =input_dict['''input_ids''']
__UpperCamelCase =input_ids.ne(1 ).to(_snake_case )
__UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase =LlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
__UpperCamelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase ='''multi_label_classification'''
__UpperCamelCase =input_dict['''input_ids''']
__UpperCamelCase =input_ids.ne(1 ).to(_snake_case )
__UpperCamelCase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase =LlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
__UpperCamelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _a ( self ) -> Union[str, Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _a ( self , A_ ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =ids_tensor([1, 10] , config.vocab_size )
__UpperCamelCase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase =LlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
__UpperCamelCase =original_model(_snake_case ).last_hidden_state
__UpperCamelCase =original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase ={'''type''': scaling_type, '''factor''': 10.0}
__UpperCamelCase =LlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
__UpperCamelCase =scaled_model(_snake_case ).last_hidden_state
__UpperCamelCase =scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__UpperCamelCase =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__UpperCamelCase =model(torch.tensor(_snake_case ) )
# Expected mean on dim = -1
__UpperCamelCase =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _a ( self ) -> Any:
__UpperCamelCase =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__UpperCamelCase =model(torch.tensor(_snake_case ) )
# Expected mean on dim = -1
__UpperCamelCase =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _snake_case , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _snake_case , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _a ( self ) -> Any:
__UpperCamelCase =[1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCamelCase =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__UpperCamelCase =model(torch.tensor(_snake_case ) )
__UpperCamelCase =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _snake_case , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCamelCase =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _snake_case , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
__UpperCamelCase ='''Simply put, the theory of relativity states that '''
__UpperCamelCase =LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__UpperCamelCase =tokenizer.encode(_snake_case , return_tensors='pt' )
__UpperCamelCase =LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_snake_case )
# greedy generation outputs
__UpperCamelCase =model.generate(_snake_case , max_new_tokens=64 , top_p=_snake_case , temperature=1 , do_sample=_snake_case )
__UpperCamelCase =tokenizer.decode(generated_ids[0] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 62 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Dict = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__lowerCamelCase )
return parser.parse_args()
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = parse_args()
# Import training_script as a module.
lowerCAmelCase_ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase_ : Dict = script_fpath.stem
lowerCAmelCase_ : str = importlib.import_module(__lowerCamelCase )
# Patch sys.argv
lowerCAmelCase_ : List[str] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 224 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : int ) -> int:
return x if y == 0 else greatest_common_divisor(__lowerCamelCase , x % y )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[Any] ) -> int:
return (x * y) // greatest_common_divisor(__lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( __snake_case : Union[str, Any] = 20 ) -> int:
__A : Union[str, Any] = 1
for i in range(1 , n + 1 ):
__A : Any = lcm(__lowerCamelCase , __lowerCamelCase )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 190 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __A ( a_ :int , a_ :List[str] , a_ :str , a_ :Any , ) -> list[float]:
__a : str = coefficient_matrix.shape
__a : Any = constant_matrix.shape
if rowsa != colsa:
__a : Union[str, Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__lowerCamelCase)
if colsa != 1:
__a : List[Any] = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__lowerCamelCase)
if rowsa != rowsa:
__a : Dict = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__lowerCamelCase)
if len(__lowerCamelCase) != rowsa:
__a : List[Any] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(__lowerCamelCase)} and {rowsa}"""
)
raise ValueError(__lowerCamelCase)
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''')
__a : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
__a : Dict = table.shape
strictly_diagonally_dominant(__lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(__lowerCamelCase):
__a : Optional[Any] = []
for row in range(__lowerCamelCase):
__a : Optional[int] = 0
for col in range(__lowerCamelCase):
if col == row:
__a : List[Any] = table[row][col]
elif col == cols - 1:
__a : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__a : Any = (temp + val) / denom
new_val.append(__lowerCamelCase)
__a : int = new_val
return [float(__lowerCamelCase) for i in new_val]
def __A ( a_ :List[Any]) -> bool:
__a : Tuple = table.shape
__a : Dict = True
for i in range(0 , __lowerCamelCase):
__a : List[str] = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''')
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 160 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[str] = "▁"
_lowerCamelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( A_ , unittest.TestCase ):
UpperCamelCase = BigBirdTokenizer
UpperCamelCase = BigBirdTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Dict ):
super().setUp()
UpperCAmelCase : Tuple = self.tokenizer_class(_snake_case, keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = '''<s>'''
UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<unk>''' )
self.assertEqual(vocab_keys[1], '''<s>''' )
self.assertEqual(vocab_keys[-1], '''[MASK]''' )
self.assertEqual(len(_snake_case ), 1_0_0_4 )
def __magic_name__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_0 )
def __magic_name__ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : List[Any] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Dict = tokenizer.tokenize(_snake_case )
UpperCAmelCase : Any = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case, _snake_case )
UpperCAmelCase : Any = tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(_snake_case, add_special_tokens=_snake_case )
self.assertListEqual(_snake_case, _snake_case )
UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Dict = tokenizer.encode(_snake_case )
UpperCAmelCase : Tuple = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case, _snake_case )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[int] = BigBirdTokenizer(_snake_case, keep_accents=_snake_case )
UpperCAmelCase : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ), [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2], )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
UpperCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case, [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4], )
UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
@cached_property
def __magic_name__ ( self : List[Any] ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[int] = '''Hello World!'''
UpperCAmelCase : Union[str, Any] = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(_snake_case, self.big_tokenizer.encode(_snake_case ) )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase : int = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(_snake_case, self.big_tokenizer.encode(_snake_case ) )
@require_torch
@slow
def __magic_name__ ( self : int ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase : Union[str, Any] = ''' '''.join(_snake_case )
UpperCAmelCase : int = self.big_tokenizer.encode_plus(_snake_case, return_tensors='''pt''', return_token_type_ids=_snake_case )
UpperCAmelCase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence], return_tensors='''pt''', return_token_type_ids=_snake_case )
UpperCAmelCase : Tuple = BigBirdConfig(attention_type='''original_full''' )
UpperCAmelCase : Any = BigBirdModel(_snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_snake_case )
model(**_snake_case )
@slow
def __magic_name__ ( self : int ):
UpperCAmelCase : Tuple = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
UpperCAmelCase : int = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = {'''input_ids''': [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='''google/bigbird-roberta-base''', revision='''215c99f1600e06f83acce68422f2035b2b5c3510''', )
| 336 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case ( A_, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = "ssube/stable-diffusion-x4-upscaler-onnx"
def lowercase_ ( self : Any , UpperCamelCase__ : Any=0)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_snake_case))
__lowerCAmelCase: Union[str, Any] = torch.manual_seed(_snake_case)
__lowerCAmelCase: List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: List[Any] = self.get_dummy_inputs()
__lowerCAmelCase: Union[str, Any] = pipe(**_snake_case).images
__lowerCAmelCase: Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: List[str] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: List[Any] = self.get_dummy_inputs()
__lowerCAmelCase: Optional[Any] = pipe(**_snake_case).images
__lowerCAmelCase: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: Any = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowercase_ ( self : Tuple)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: Union[str, Any] = self.get_dummy_inputs()
__lowerCAmelCase: str = pipe(**_snake_case).images
__lowerCAmelCase: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: Tuple = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: Optional[Any] = self.get_dummy_inputs()
__lowerCAmelCase: Any = pipe(**_snake_case).images
__lowerCAmelCase: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: List[str] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: List[str] = self.get_dummy_inputs()
__lowerCAmelCase: int = pipe(**_snake_case).images
__lowerCAmelCase: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case ( unittest.TestCase ):
@property
def lowercase_ ( self : int)-> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = ort.SessionOptions()
__lowerCAmelCase: Optional[Any] = False
return options
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
__lowerCAmelCase: int = init_image.resize((1_2_8, 1_2_8))
# using the PNDM scheduler by default
__lowerCAmelCase: Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: Any = '''A fantasy landscape, trending on artstation'''
__lowerCAmelCase: Union[str, Any] = torch.manual_seed(0)
__lowerCAmelCase: Tuple = pipe(
prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_snake_case , output_type="np" , )
__lowerCAmelCase: Tuple = output.images
__lowerCAmelCase: List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: Any = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def lowercase_ ( self : List[str])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
__lowerCAmelCase: List[str] = init_image.resize((1_2_8, 1_2_8))
__lowerCAmelCase: Any = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler")
__lowerCAmelCase: List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: List[Any] = '''A fantasy landscape, trending on artstation'''
__lowerCAmelCase: Optional[int] = torch.manual_seed(0)
__lowerCAmelCase: Union[str, Any] = pipe(
prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_snake_case , output_type="np" , )
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: str = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: str = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 217 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , __a , __a=3 , __a=32 , __a=3 , __a=10 , __a=[8, 16, 32, 64] , __a=[1, 1, 2, 1] , __a=True , __a=True , __a="relu" , __a=3 , __a=None , __a=["stage2", "stage3", "stage4"] , __a=[2, 3, 4] , __a=1 , ):
'''simple docstring'''
__a : Any = parent
__a : Dict = batch_size
__a : Any = image_size
__a : Dict = num_channels
__a : int = embeddings_size
__a : str = hidden_sizes
__a : Tuple = depths
__a : Tuple = is_training
__a : str = use_labels
__a : int = hidden_act
__a : List[Any] = num_labels
__a : Dict = scope
__a : Dict = len(_snake_case )
__a : Optional[int] = out_features
__a : List[str] = out_indices
__a : Any = num_groups
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[int] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = BitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__a : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = self.num_labels
__a : List[str] = BitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
__a : int = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : int = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
__a : str = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a : Tuple = None
__a : Optional[int] = BitBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
__a : int = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a : Optional[Any] = config_and_inputs
__a : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A_ , A_ , unittest.TestCase ):
A_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
A_ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = BitModelTester(self )
__a : Tuple = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_snake_case )
__a : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : List[str] = [*signature.parameters.keys()]
__a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(config=_snake_case )
for name, module in model.named_modules():
if isinstance(_snake_case , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a : Tuple = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : int = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : str = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a : Dict = layer_type
__a : Tuple = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : List[Any] = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = BitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowerCamelCase ():
__a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_snake_case )
__a : List[Any] = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
__a : Optional[int] = model(**_snake_case )
# verify the logits
__a : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__a : List[str] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@require_torch
class __UpperCamelCase ( A_ , unittest.TestCase ):
A_ = (BitBackbone,) if is_torch_available() else ()
A_ = BitConfig
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitModelTester(self )
| 27 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = 0
# Number of processes finished
_lowerCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_lowerCAmelCase = [0] * no_of_process
# List to include calculation results
_lowerCAmelCase = [0] * no_of_process
# Sort by arrival time.
_lowerCAmelCase = [burst_time[i] for i in np.argsort(__lowerCamelCase )]
_lowerCAmelCase = [process_name[i] for i in np.argsort(__lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
_lowerCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_lowerCAmelCase = arrival_time[i]
_lowerCAmelCase = 0
# Index showing the location of the process being performed
_lowerCAmelCase = 0
# Saves the current response ratio.
_lowerCAmelCase = 0
for i in range(0 , __lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_lowerCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_lowerCAmelCase = temp
_lowerCAmelCase = i
# Calculate the turn around time
_lowerCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_lowerCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [0] * no_of_process
for i in range(0 , __lowerCamelCase ):
_lowerCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A__ : str =5
A__ : List[Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
A__ : Optional[Any] =[1, 2, 3, 4, 5]
A__ : List[str] =[1, 2, 3, 4, 5]
A__ : int =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A__ : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 70 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase_ = logging.getLogger(__name__)
def a__ ( snake_case , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = bnb_quantization_config.load_in_abit
__SCREAMING_SNAKE_CASE : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__SCREAMING_SNAKE_CASE : List[str] = []
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
__SCREAMING_SNAKE_CASE : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__SCREAMING_SNAKE_CASE : Any = get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
__SCREAMING_SNAKE_CASE : Optional[Any] = load_in_abit
__SCREAMING_SNAKE_CASE : Optional[Any] = load_in_abit
__SCREAMING_SNAKE_CASE : str = get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__SCREAMING_SNAKE_CASE : Dict = replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
__SCREAMING_SNAKE_CASE : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__SCREAMING_SNAKE_CASE : Any = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
__SCREAMING_SNAKE_CASE : Dict = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
__SCREAMING_SNAKE_CASE : int = replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : int = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def a__ ( snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE : Optional[int] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__SCREAMING_SNAKE_CASE : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = special_dtypes
__SCREAMING_SNAKE_CASE : Optional[int] = no_split_module_classes
__SCREAMING_SNAKE_CASE : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__SCREAMING_SNAKE_CASE : Optional[Any] = get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
__SCREAMING_SNAKE_CASE : int = max_memory
__SCREAMING_SNAKE_CASE : List[Any] = infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
__SCREAMING_SNAKE_CASE : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def a__ ( snake_case , snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if modules_to_not_convert is None:
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a__ ( snake_case , snake_case , snake_case=None , snake_case=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
__SCREAMING_SNAKE_CASE : Optional[int] = []
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''.'''.join(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__SCREAMING_SNAKE_CASE : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE : Optional[int] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__SCREAMING_SNAKE_CASE : List[str] = module.weight.data
if module.bias is not None:
__SCREAMING_SNAKE_CASE : Dict = module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = True
if len(list(module.children() ) ) > 0:
__SCREAMING_SNAKE_CASE : int = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a__ ( snake_case ):
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
__SCREAMING_SNAKE_CASE : int = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__SCREAMING_SNAKE_CASE : Optional[Any] = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = sum(__lowerCamelCase , [] )
__SCREAMING_SNAKE_CASE : List[str] = len(__lowerCamelCase ) > 0
# Check if it is a base model
__SCREAMING_SNAKE_CASE : Any = False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__SCREAMING_SNAKE_CASE : int = list(model.named_children() )
__SCREAMING_SNAKE_CASE : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
__SCREAMING_SNAKE_CASE : Union[str, Any] = set(__lowerCamelCase ) - set(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
__SCREAMING_SNAKE_CASE : int = ['''.weight''', '''.bias''']
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__SCREAMING_SNAKE_CASE : str = name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def a__ ( snake_case ):
"""simple docstring"""
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def a__ ( snake_case ):
"""simple docstring"""
return next(parameter.parameters() ).device
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = param_name
__SCREAMING_SNAKE_CASE : List[Any] = model
if "." in tensor_name:
__SCREAMING_SNAKE_CASE : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
__SCREAMING_SNAKE_CASE : int = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
__SCREAMING_SNAKE_CASE : int = new_module
__SCREAMING_SNAKE_CASE : str = splits[-1]
# offload weights
__SCREAMING_SNAKE_CASE : Any = False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 303 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 0 |
"""simple docstring"""
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCAmelCase_ ( snake_case_ : int ) ->Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCAmelCase_ ( snake_case_ : str ) ->List[Any]:
lowerCamelCase__ : Dict =create_tensor(__lowerCamelCase )
lowerCamelCase__ : int =gather(__lowerCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->List[str]:
lowerCamelCase__ : List[str] =[state.process_index]
lowerCamelCase__ : Dict =gather_object(__lowerCamelCase )
assert len(__lowerCamelCase ) == state.num_processes, f"""{gathered_obj}, {len(__lowerCamelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def lowerCAmelCase_ ( snake_case_ : str ) ->List[Any]:
lowerCamelCase__ : str =create_tensor(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =broadcast(__lowerCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCAmelCase_ ( snake_case_ : Dict ) ->List[str]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowerCamelCase__ : Any =torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCamelCase__ : Tuple =torch.arange(state.num_processes ).to(state.device )
lowerCamelCase__ : List[str] =pad_across_processes(__lowerCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCAmelCase_ ( snake_case_ : int ) ->Optional[int]:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase__ : Tuple =create_tensor(__lowerCamelCase )
lowerCamelCase__ : Dict =reduce(__lowerCamelCase , 'sum' )
lowerCamelCase__ : Optional[int] =torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->int:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase__ : List[str] =create_tensor(__lowerCamelCase )
lowerCamelCase__ : int =reduce(__lowerCamelCase , 'mean' )
lowerCamelCase__ : List[str] =torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def lowerCAmelCase_ ( snake_case_ : Dict ) ->Optional[int]:
# For xla_spawn (TPUs)
main()
def lowerCAmelCase_ ( ) ->List[str]:
lowerCamelCase__ : List[str] =PartialState()
state.print(f"""State: {state}""" )
state.print('testing gather' )
test_gather(__lowerCamelCase )
state.print('testing gather_object' )
test_gather_object(__lowerCamelCase )
state.print('testing broadcast' )
test_broadcast(__lowerCamelCase )
state.print('testing pad_across_processes' )
test_pad_across_processes(__lowerCamelCase )
state.print('testing reduce_sum' )
test_reduce_sum(__lowerCamelCase )
state.print('testing reduce_mean' )
test_reduce_mean(__lowerCamelCase )
if __name__ == "__main__":
main() | 126 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 0 |
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : int , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=None ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = data
snake_case : Tuple = previous
snake_case : Tuple = next_node
def __str__(self : Optional[Any] ) -> str:
'''simple docstring'''
return f"""{self.data}"""
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
return self.data
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.next
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.previous
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Any ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = head
def __iter__(self : Union[str, Any] ) -> Any:
'''simple docstring'''
return self
def _SCREAMING_SNAKE_CASE (self : str ) -> Any:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
snake_case : str = self.current.get_data()
snake_case : Optional[Any] = self.current.get_next()
return value
class UpperCAmelCase :
def __init__(self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Tuple = None # First node in list
snake_case : int = None # Last node in list
def __str__(self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = self.head
snake_case : Dict = []
while current is not None:
nodes.append(current.get_data() )
snake_case : Dict = current.get_next()
return " ".join(str(_snake_case ) for node in nodes )
def __contains__(self : Any , snake_case__ : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.head
while current:
if current.get_data() == value:
return True
snake_case : Tuple = current.get_next()
return False
def __iter__(self : Any ) -> Union[str, Any]:
'''simple docstring'''
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
snake_case : Dict = node
snake_case : Dict = node
else:
self.insert_before_node(self.head , _snake_case )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(_snake_case )
else:
self.insert_after_node(self.tail , _snake_case )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int ) -> None:
'''simple docstring'''
snake_case : Any = Node(_snake_case )
if self.head is None:
self.set_head(_snake_case )
else:
self.set_tail(_snake_case )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Node , snake_case__ : Node ) -> None:
'''simple docstring'''
snake_case : Tuple = node
snake_case : Optional[Any] = node.previous
if node.get_previous() is None:
snake_case : Optional[int] = node_to_insert
else:
snake_case : List[str] = node_to_insert
snake_case : Tuple = node_to_insert
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Node , snake_case__ : Node ) -> None:
'''simple docstring'''
snake_case : Dict = node
snake_case : str = node.next
if node.get_next() is None:
snake_case : int = node_to_insert
else:
snake_case : Optional[int] = node_to_insert
snake_case : Tuple = node_to_insert
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
snake_case : Dict = 1
snake_case : Optional[Any] = Node(_snake_case )
snake_case : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_snake_case , _snake_case )
return
current_position += 1
snake_case : Optional[Any] = node.next
self.insert_after_node(self.tail , _snake_case )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : int ) -> Node:
'''simple docstring'''
snake_case : Any = self.head
while node:
if node.get_data() == item:
return node
snake_case : Tuple = node.get_next()
raise Exception("Node not found" )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : List[Any] ) -> str:
'''simple docstring'''
if (node := self.get_node(_snake_case )) is not None:
if node == self.head:
snake_case : List[Any] = self.head.get_next()
if node == self.tail:
snake_case : List[Any] = self.tail.get_previous()
self.remove_node_pointers(_snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Node ) -> None:
'''simple docstring'''
if node.get_next():
snake_case : int = node.previous
if node.get_previous():
snake_case : Optional[int] = node.next
snake_case : List[Any] = None
snake_case : Tuple = None
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
return self.head is None
def UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 0 |
from math import isclose, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =point_y / 4 / point_x
__UpperCamelCase =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__UpperCamelCase =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__UpperCamelCase =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__UpperCamelCase =outgoing_gradient**2 + 4
__UpperCamelCase =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__UpperCamelCase =(point_y - outgoing_gradient * point_x) ** 2 - 1_00
__UpperCamelCase =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__UpperCamelCase =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__UpperCamelCase =x_minus if isclose(__lowerCamelCase , __lowerCamelCase ) else x_plus
__UpperCamelCase =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 1.4 , SCREAMING_SNAKE_CASE__ : List[Any] = -9.6 ):
__UpperCamelCase =0
__UpperCamelCase =first_x_coord
__UpperCamelCase =first_y_coord
__UpperCamelCase =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__UpperCamelCase =next_point(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowercase__ : Optional[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase_ : Tuple = '''lm_head'''
lowerCAmelCase_ : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCAmelCase_ : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCAmelCase_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowerCAmelCase_ : Optional[int] = value
elif weight_type == "weight_g":
lowerCAmelCase_ : Tuple = value
elif weight_type == "weight_v":
lowerCAmelCase_ : Any = value
elif weight_type == "bias":
lowerCAmelCase_ : int = value
else:
lowerCAmelCase_ : Tuple = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : int = fairseq_model.state_dict()
lowerCAmelCase_ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase_ : int = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase_ : Union[str, Any] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase_ : List[str] = True
if "*" in mapped_key:
lowerCAmelCase_ : Tuple = name.split(__lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase_ : Union[str, Any] = mapped_key.replace('*' , __lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase_ : int = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ : Tuple = '''weight_v'''
elif "bias" in name:
lowerCAmelCase_ : Tuple = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ : List[str] = '''weight'''
else:
lowerCAmelCase_ : Dict = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = full_name.split('conv_layers.' )[-1]
lowerCAmelCase_ : int = name.split('.' )
lowerCAmelCase_ : str = int(items[0] )
lowerCAmelCase_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowerCAmelCase_ : Any = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowerCAmelCase_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowerCAmelCase_ : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowerCAmelCase_ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase_ : Union[str, Any] = UniSpeechConfig.from_pretrained(__lowerCamelCase )
else:
lowerCAmelCase_ : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase_ : Union[str, Any] = Dictionary.load_from_json(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase_ : Optional[int] = target_dict.pad_index
lowerCAmelCase_ : Optional[Any] = target_dict.bos_index
lowerCAmelCase_ : Optional[int] = target_dict.eos_index
lowerCAmelCase_ : Tuple = len(target_dict.symbols )
lowerCAmelCase_ : Optional[int] = os.path.join(__lowerCamelCase , 'vocab.json' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCAmelCase_ : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase_ : Any = 42
lowerCAmelCase_ : Union[str, Any] = 43
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase_ : Tuple = WavaVecaPhonemeCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowerCamelCase , )
lowerCAmelCase_ : str = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
lowerCAmelCase_ : Tuple = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowerCAmelCase_ : List[str] = UniSpeechForCTC(__lowerCamelCase )
else:
lowerCAmelCase_ : List[Any] = UniSpeechForPreTraining(__lowerCamelCase )
if is_finetuned:
lowerCAmelCase_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
lowerCAmelCase_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase_ : Union[str, Any] = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_unispeech.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowercase__ : str = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 224 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase__ : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[List[ImageInput]]:
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCamelCase ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class SCREAMING_SNAKE_CASE (A_ ):
lowerCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_snake_case)
__A : int = size if size is not None else {'''shortest_edge''': 256}
__A : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case)
__A : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__A : Optional[Any] = get_size_dict(_snake_case , param_name='crop_size')
__A : List[Any] = do_resize
__A : Optional[int] = size
__A : Union[str, Any] = do_center_crop
__A : int = crop_size
__A : List[str] = resample
__A : int = do_rescale
__A : Tuple = rescale_factor
__A : List[Any] = offset
__A : Optional[int] = do_normalize
__A : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[int] = get_size_dict(_snake_case , default_to_square=_snake_case)
if "shortest_edge" in size:
__A : Optional[int] = get_resize_output_image_size(_snake_case , size['shortest_edge'] , default_to_square=_snake_case)
elif "height" in size and "width" in size:
__A : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Dict = get_size_dict(_snake_case)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[Any] = image.astype(np.floataa)
if offset:
__A : List[str] = image - (scale / 2)
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.')
# All transformations expect numpy arrays.
__A : Dict = to_numpy_array(_snake_case)
if do_resize:
__A : Union[str, Any] = self.resize(image=_snake_case , size=_snake_case , resample=_snake_case)
if do_center_crop:
__A : Optional[Any] = self.center_crop(_snake_case , size=_snake_case)
if do_rescale:
__A : List[Any] = self.rescale(image=_snake_case , scale=_snake_case , offset=_snake_case)
if do_normalize:
__A : List[Any] = self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case)
__A : List[str] = to_channel_dimension_format(_snake_case , _snake_case)
return image
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__A : int = resample if resample is not None else self.resample
__A : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : str = do_rescale if do_rescale is not None else self.do_rescale
__A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[Any] = offset if offset is not None else self.offset
__A : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Any = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case)
__A : Tuple = crop_size if crop_size is not None else self.crop_size
__A : Union[str, Any] = get_size_dict(_snake_case , param_name='crop_size')
if not valid_images(_snake_case):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
__A : Any = make_batched(_snake_case)
__A : Optional[int] = [
[
self._preprocess_image(
image=_snake_case , do_resize=_snake_case , size=_snake_case , resample=_snake_case , do_center_crop=_snake_case , crop_size=_snake_case , do_rescale=_snake_case , rescale_factor=_snake_case , offset=_snake_case , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , data_format=_snake_case , )
for img in video
]
for video in videos
]
__A : Dict = {'''pixel_values''': videos}
return BatchFeature(data=_snake_case , tensor_type=_snake_case) | 190 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''vocab.txt'''}
A = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
A = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
A = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class __lowercase ( A_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ConvBertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
__a : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
__a : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
__a : Optional[Any] = do_lower_case
__a : List[str] = strip_accents
__a : Optional[Any] = tokenize_chinese_chars
__a : List[str] = normalizer_class(**_snake_case )
__a : Optional[int] = do_lower_case
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case ) | 160 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 0 |
from __future__ import annotations
from random import random
class __UpperCAmelCase :
def __init__( self : str, __A : int | None = None ):
UpperCAmelCase : int = value
UpperCAmelCase : Any = random()
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def __repr__( self : Tuple ):
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)}, indent=1 )
def __str__( self : Dict ):
UpperCAmelCase : Dict = str(self.value ) + ''' '''
UpperCAmelCase : Dict = str(self.left or '''''' )
UpperCAmelCase : List[str] = str(self.right or '''''' )
return value + left + right
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
UpperCAmelCase : Dict = split(root.left , __lowerCamelCase )
return left, root
else:
UpperCAmelCase : Optional[Any] = split(root.right , __lowerCamelCase )
return root, right
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
UpperCAmelCase : Optional[int] = merge(left.right , __lowerCamelCase )
return left
else:
UpperCAmelCase : str = merge(__lowerCamelCase , right.left )
return right
def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Dict ) -> Node | None:
UpperCAmelCase : Union[str, Any] = Node(__lowerCamelCase )
UpperCAmelCase : Optional[Any] = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Node | None:
UpperCAmelCase : List[str] = split(__lowerCamelCase , value - 1 )
UpperCAmelCase : Tuple = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
UpperCAmelCase : Any = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
UpperCAmelCase : List[Any] = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def a__ ( ) -> None:
UpperCAmelCase : str = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
UpperCAmelCase : Optional[Any] = input()
while args != "q":
UpperCAmelCase : Union[str, Any] = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
UpperCAmelCase : Optional[Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 336 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: List[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Dict = emb.weight.shape
__lowerCAmelCase: List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
__lowerCAmelCase: Optional[int] = emb.weight.data
return lin_layer
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
__lowerCAmelCase: int = torch.load(__lowerCamelCase , map_location="cpu" )['''model''']
remove_ignore_keys_(__lowerCamelCase )
__lowerCAmelCase: Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowerCAmelCase: Optional[int] = MBartConfig.from_pretrained(__lowerCamelCase , vocab_size=__lowerCamelCase )
if mbart_aa and finetuned:
__lowerCAmelCase: Tuple = '''relu'''
__lowerCAmelCase: List[Any] = state_dict['''decoder.embed_tokens.weight''']
__lowerCAmelCase: Tuple = MBartForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase )
if finetuned:
__lowerCAmelCase: Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__A = parser.parse_args()
__A = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 217 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
__lowercase : int = 3_00 # TEMPERATURE (unit = K)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self : int , __snake_case : Union[str, Any] , __snake_case : Tuple=12 , __snake_case : Optional[int]=7 , __snake_case : List[Any]=True , __snake_case : int=True , __snake_case : Optional[int]=True , __snake_case : Union[str, Any]=99 , __snake_case : int=32 , __snake_case : int=32 , __snake_case : List[Any]=2 , __snake_case : int=4 , __snake_case : Union[str, Any]=37 , __snake_case : Dict=0.1 , __snake_case : int=0.1 , __snake_case : int=5_12 , __snake_case : Tuple=0.02 , __snake_case : Optional[Any]=0 , __snake_case : Union[str, Any]=None , ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = projection_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = bos_token_id
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCAmelCase = input_mask.numpy()
_lowerCAmelCase = input_mask.shape
_lowerCAmelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def lowercase__ ( self : Tuple ) -> str:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[Any] ) -> Dict:
_lowerCAmelCase = TFBlipTextModel(config=_snake_case )
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
_lowerCAmelCase = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( A_ , unittest.TestCase ):
_lowercase: int = (TFBlipTextModel,) if is_tf_available() else ()
_lowercase: Optional[Any] = False
_lowercase: List[Any] = False
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Any ) -> Any:
_lowerCAmelCase = BlipTextModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowercase__ ( self : str ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowercase__ ( self : List[Any] ) -> Tuple:
pass
def lowercase__ ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> str:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowercase__ ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowercase__ ( self : Dict , __snake_case : List[str]=True ) -> Optional[int]:
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 70 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A_ ):
"""simple docstring"""
def __init__( self : Any , *_A : int , **_A : str ):
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 303 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 126 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCamelCase = """\\n Text data.\n Second line of data."""
__lowerCamelCase = """file"""
@pytest.fixture(scope="session" )
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : Union[str, Any] = tmp_path_factory.mktemp("data" ) / (FILE_PATH + '''.zstd''')
snake_case : Any = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def UpperCamelCase ( __lowerCamelCase : List[str] ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , "w" ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
snake_case : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
snake_case : str = input_paths[compression_format]
snake_case : Tuple = tmp_path / '''cache'''
snake_case : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
snake_case : Dict = f.read()
with open(__lowerCamelCase ) as f:
snake_case : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ):
snake_case : Any = '''custom_cache'''
snake_case : Tuple = '''custom_extracted_dir'''
snake_case : Tuple = tmp_path / '''custom_extracted_path'''
if default_extracted:
snake_case : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCamelCase ) )
snake_case : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case : int = xz_file
snake_case : List[Any] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
# absolute path
snake_case : List[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
snake_case : Union[str, Any] = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def UpperCamelCase ( __lowerCamelCase : Any ):
# absolute path
snake_case : Dict = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
snake_case : Dict = '''./__missing_file__.txt'''
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : List[str] = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(__lowerCamelCase ) as f:
snake_case : int = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def UpperCamelCase ( ):
with pytest.raises(__lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : str = tmp_path_factory.mktemp("data" ) / '''file.html'''
with pytest.raises(__lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : List[str] = tmp_path_factory.mktemp("data" ) / '''file.html'''
with pytest.raises(__lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / '''file.html'''
with pytest.raises(__lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 59 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ : ClassVar[Features] = Features({"image": Image()} )
UpperCAmelCase__ : ClassVar[Features] = Features({"labels": ClassLabel} )
UpperCAmelCase__ : str = "image"
UpperCAmelCase__ : str = "labels"
def _a ( self , A_ ) -> str:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _snake_case ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCamelCase =copy.deepcopy(self )
__UpperCamelCase =self.label_schema.copy()
__UpperCamelCase =features[self.label_column]
__UpperCamelCase =label_schema
return task_template
@property
def _a ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 62 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowercase__ : Optional[int] = 4
lowercase__ : Union[str, Any] = 3
class UpperCamelCase__ ( A_ ):
"""simple docstring"""
pass
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : int = int(os.environ['RANK'] )
lowerCAmelCase_ : str = int(os.environ['WORLD_SIZE'] )
lowerCAmelCase_ : List[Any] = ArgumentParser()
parser.add_argument('--streaming' , type=__lowerCamelCase )
parser.add_argument('--local_rank' , type=__lowerCamelCase )
parser.add_argument('--num_workers' , type=__lowerCamelCase , default=0 )
lowerCAmelCase_ : int = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = args.streaming
lowerCAmelCase_ : List[Any] = args.num_workers
lowerCAmelCase_ : Optional[Any] = {'''shards''': [f"shard_{shard_idx}" for shard_idx in range(__lowerCamelCase )]}
lowerCAmelCase_ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowerCAmelCase_ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowerCAmelCase_ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCAmelCase_ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCAmelCase_ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 224 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = name
__A : int = value
__A : int = weight
def __repr__( self):
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.value
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.name
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.weight
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.value / self.weight
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Any , __snake_case : str ) -> List[Any]:
__A : List[Any] = []
for i in range(len(__lowerCamelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any ) -> Union[str, Any]:
__A : Tuple = sorted(__lowerCamelCase , key=__lowerCamelCase , reverse=__lowerCamelCase )
__A : int = []
__A : Optional[int] = 0.0, 0.0
for i in range(len(__lowerCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowerCAmelCase ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
A = 8
def __A ( a_ :str , a_ :Any=BITS) -> Optional[Any]:
__a : List[Any] = x.device
__a : List[Any] = (x * 2_55).int().clamp(0 , 2_55)
__a : List[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCamelCase)
__a : Tuple = rearrange(__lowerCamelCase , '''d -> d 1 1''')
__a : Optional[int] = rearrange(__lowerCamelCase , '''b c h w -> b c 1 h w''')
__a : Tuple = ((x & mask) != 0).float()
__a : Dict = rearrange(__lowerCamelCase , '''b c d h w -> b (c d) h w''')
__a : Optional[Any] = bits * 2 - 1
return bits
def __A ( a_ :Dict , a_ :str=BITS) -> List[Any]:
__a : str = x.device
__a : Tuple = (x > 0).int()
__a : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCamelCase , dtype=torch.intaa)
__a : List[Any] = rearrange(__lowerCamelCase , '''d -> d 1 1''')
__a : Tuple = rearrange(__lowerCamelCase , '''b (c d) h w -> b c d h w''' , d=8)
__a : Union[str, Any] = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''')
return (dec / 2_55).clamp(0.0 , 1.0)
def __A ( self :str , a_ :Union[str, Any] , a_ :Optional[int] , a_ :Dict , a_ :Any = 0.0 , a_ :Optional[Any] = True , a_ :Any=None , a_ :int = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''')
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__a : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__a : Tuple = self.alphas_cumprod[timestep]
__a : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__a : str = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__a : Dict = self.bit_scale
if self.config.clip_sample:
__a : Union[str, Any] = torch.clamp(__lowerCamelCase , -scale , __lowerCamelCase)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__a : Optional[Any] = self._get_variance(__lowerCamelCase , __lowerCamelCase)
__a : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__a : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__a : Union[str, Any] = model_output.device if torch.is_tensor(__lowerCamelCase) else '''cpu'''
__a : Union[str, Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCamelCase).to(__lowerCamelCase)
__a : Dict = self._get_variance(__lowerCamelCase , __lowerCamelCase) ** 0.5 * eta * noise
__a : List[Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase)
def __A ( self :int , a_ :str , a_ :Union[str, Any] , a_ :Any , a_ :Dict="epsilon" , a_ :Union[str, Any]=None , a_ :str = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__a : List[str] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__a : Optional[Any] = torch.split(__lowerCamelCase , sample.shape[1] , dim=1)
else:
__a : Optional[int] = None
# 1. compute alphas, betas
__a : Tuple = self.alphas_cumprod[t]
__a : Tuple = self.alphas_cumprod[t - 1] if t > 0 else self.one
__a : Optional[Any] = 1 - alpha_prod_t
__a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__a : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__a : Optional[int] = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""")
# 3. Clip "predicted x_0"
__a : Tuple = self.bit_scale
if self.config.clip_sample:
__a : Union[str, Any] = torch.clamp(__lowerCamelCase , -scale , __lowerCamelCase)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Any = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__a : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__a : Any = 0
if t > 0:
__a : List[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCamelCase).to(model_output.device)
__a : Dict = (self._get_variance(__lowerCamelCase , predicted_variance=__lowerCamelCase) ** 0.5) * noise
__a : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase)
class __lowercase ( A_ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1.0 , ):
super().__init__()
__a : Optional[Any] = bit_scale
__a : Dict = (
ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self , _UpperCAmelCase = 256 , _UpperCAmelCase = 256 , _UpperCAmelCase = 50 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , **_UpperCAmelCase , ):
__a : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , )
__a : Dict = decimal_to_bits(_snake_case ) * self.bit_scale
__a : Optional[Any] = latents.to(self.device )
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__a : List[Any] = self.unet(_snake_case , _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
__a : Any = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
__a : str = bits_to_decimal(_snake_case )
if output_type == "pil":
__a : str = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case ) | 160 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
from __future__ import annotations
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : int = None ) -> list[list[str]]:
UpperCAmelCase : List[str] = word_bank or []
# create a table
UpperCAmelCase : int = len(__lowerCamelCase ) + 1
UpperCAmelCase : list[list[list[str]]] = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
UpperCAmelCase : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
UpperCAmelCase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 336 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self : Dict , UpperCamelCase__ : Dict)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = str(id_)
__lowerCAmelCase: Tuple = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: Dict = []
__lowerCAmelCase: int = {} # {vertex:distance}
def __lt__( self : List[Any] , UpperCamelCase__ : int)-> Any:
'''simple docstring'''
return self.key < other.key
def __repr__( self : Tuple)-> List[str]:
'''simple docstring'''
return self.id
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[Any])-> Any:
'''simple docstring'''
self.neighbors.append(_snake_case)
def lowercase_ ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = weight
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __lowerCamelCase )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
__lowerCAmelCase: List[Any] = []
for u in graph:
__lowerCAmelCase: str = math.inf
__lowerCAmelCase: Optional[Any] = None
__lowerCAmelCase: Union[str, Any] = 0
__lowerCAmelCase: Tuple = graph[:]
while q:
__lowerCAmelCase: int = min(__lowerCamelCase )
q.remove(__lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowerCAmelCase: List[Any] = u
__lowerCAmelCase: Tuple = u.edges[v.id]
for i in range(1 , len(__lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
__lowerCAmelCase: List[str] = math.inf
__lowerCAmelCase: Any = None
__lowerCAmelCase: Dict = 0
__lowerCAmelCase: Any = list(__lowerCamelCase )
hq.heapify(__lowerCamelCase )
while h:
__lowerCAmelCase: List[str] = hq.heappop(__lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowerCAmelCase: str = u
__lowerCAmelCase: Optional[int] = u.edges[v.id]
hq.heapify(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowercase : Any = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=None ):
# Recurse if needed
if "." in tensor_name:
__a : List[str] = tensor_name.split('.' )
for split in splits[:-1]:
__a : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__a : Optional[int] = new_module
__a : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
__a : List[Any] = tensor_name in module._buffers
__a : Any = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
__a : Any = False
__a : List[str] = False
if is_buffer or not is_bitsandbytes_available():
__a : Union[str, Any] = False
__a : str = False
else:
__a : Any = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__a : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__a : Tuple = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__a : List[Any] = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__a : str = value.to('cpu' )
if value.dtype == torch.inta:
__a : str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__a : Tuple = torch.tensor(__lowerCamelCase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
__a : str = new_value.T
__a : str = old_value.__dict__
if is_abit:
__a : Union[str, Any] = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
__a : Optional[int] = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
__a : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
__a : Union[str, Any] = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__a : List[Any] = value.to(__lowerCamelCase )
else:
__a : Optional[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
__a : Tuple = new_value
else:
__a : List[Any] = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
__a : int = new_value
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Any=False ):
for name, module in model.named_children():
if current_key_name is None:
__a : Tuple = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__a : str = module.weight.shape
else:
__a : str = module.in_features
__a : Union[str, Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__a : Union[str, Any] = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__a : List[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__a : Tuple = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__a : Optional[int] = True
# Store the module class in case we need to transpose the weight later
__a : str = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
__a : Any = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Dict=None ):
__a : Dict = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
__a : str = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : int ):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : int ):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__a : Union[str, Any] = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__a : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__a : Union[str, Any] = sum(__lowerCamelCase , [] )
__a : Dict = len(__lowerCamelCase ) > 0
# Check if it is a base model
__a : Optional[Any] = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__a : Optional[int] = list(model.named_children() )
__a : int = [list_modules[-1][0]]
# add last module together with tied weights
__a : int = set(__lowerCamelCase ) - set(__lowerCamelCase )
__a : Dict = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
__a : Dict = ['''.weight''', '''.bias''']
__a : Dict = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__a : List[str] = name.replace(__lowerCamelCase , '' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 27 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A__ : List[Any] ='''sshleifer/bart-tiny-random'''
A__ : Any ='''patrickvonplaten/t5-tiny-random'''
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> int:
return AutoConfig.from_pretrained(_snake_case )
def lowercase__ ( self : Optional[int] ) -> List[str]:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowercase__ ( self : List[Any] ) -> Any:
_lowerCAmelCase = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowercase__ ( self : Any ) -> List[Any]:
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 70 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowercase_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowercase_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowercase_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_MAPPING
lowercase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCAmelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 303 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : int ) ->int:
while b:
lowerCamelCase__ : str =b, a % b
return a
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) ->int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCamelCase , a % b )
def lowerCAmelCase_ ( ) ->Tuple:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main() | 126 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : int = BertJapaneseTokenizer
A__ : Dict = False
A__ : str = True
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict ) -> str:
'''simple docstring'''
snake_case : str = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case : Any = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.get_input_output_texts(_snake_case )
snake_case : int = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
snake_case : Optional[Any] = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case )
return text, ids
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = self.tokenizer_class(self.vocab_file )
snake_case : Any = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(_snake_case )
snake_case : Optional[Any] = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case : Optional[int] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case : Dict = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_snake_case , "wb" ) as handle:
pickle.dump(_snake_case , _snake_case )
with open(_snake_case , "rb" ) as handle:
snake_case : int = pickle.load(_snake_case )
snake_case : Optional[Any] = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
try:
snake_case : Optional[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
try:
snake_case : Dict = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = MecabTokenizer(do_lower_case=_snake_case , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
snake_case : Tuple = MecabTokenizer(
do_lower_case=_snake_case , normalize_text=_snake_case , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : str = MecabTokenizer(normalize_text=_snake_case , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(_snake_case )
snake_case : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case : Optional[int] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case : Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_snake_case , "wb" ) as handle:
pickle.dump(_snake_case , _snake_case )
with open(_snake_case , "rb" ) as handle:
snake_case : Optional[int] = pickle.load(_snake_case )
snake_case : Dict = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
snake_case : int = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
snake_case : Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = SudachiTokenizer(do_lower_case=_snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = SudachiTokenizer(normalize_text=_snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = SudachiTokenizer(trim_whitespace=_snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> str:
'''simple docstring'''
snake_case : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(_snake_case )
snake_case : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case : List[Any] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(_snake_case , "wb" ) as handle:
pickle.dump(_snake_case , _snake_case )
with open(_snake_case , "rb" ) as handle:
snake_case : Optional[int] = pickle.load(_snake_case )
snake_case : Tuple = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@require_jumanpp
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
snake_case : List[str] = JumanppTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Any = JumanppTokenizer(normalize_text=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : int = JumanppTokenizer(trim_whitespace=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
snake_case : Dict = {}
for i, token in enumerate(_snake_case ):
snake_case : Any = i
snake_case : Any = WordpieceTokenizer(vocab=_snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
snake_case : Union[str, Any] = tokenizer.subword_tokenizer
snake_case : Tuple = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(_snake_case , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
snake_case : List[Any] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(_snake_case , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Any = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
snake_case : List[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=_snake_case )
snake_case : Optional[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=_snake_case )
snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case )
snake_case : str = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : Optional[Any] = BertJapaneseTokenizer
A__ : Tuple = False
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
snake_case : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , **snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **_snake_case )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case : Optional[int] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE (self : Any ) -> str:
'''simple docstring'''
snake_case : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
snake_case : Union[str, Any] = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
_snake_case , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case : Optional[Any] = {}
for i, token in enumerate(_snake_case ):
snake_case : List[str] = i
snake_case : Optional[Any] = CharacterTokenizer(vocab=_snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
snake_case : Dict = tokenizer.encode("ありがとう。" , add_special_tokens=_snake_case )
snake_case : Union[str, Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=_snake_case )
snake_case : str = tokenizer.build_inputs_with_special_tokens(_snake_case )
snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
snake_case : Dict = '''cl-tohoku/bert-base-japanese'''
snake_case : List[str] = AutoTokenizer.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
snake_case : List[str] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(_snake_case )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
snake_case : List[str] = '''bert-base-cased'''
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(_snake_case )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 59 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__UpperCamelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
sd_pipe.set_scheduler('sample_euler' )
__UpperCamelCase ='''A painting of a squirrel eating a burger'''
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =sd_pipe([prompt] , generator=_snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> str:
__UpperCamelCase =StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
sd_pipe.set_scheduler('sample_euler' )
__UpperCamelCase ='''A painting of a squirrel eating a burger'''
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =sd_pipe([prompt] , generator=_snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _a ( self ) -> int:
__UpperCamelCase =StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__UpperCamelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__UpperCamelCase ='''A painting of a squirrel eating a burger'''
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_snake_case , )
__UpperCamelCase =output.images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase =np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 62 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 50 ) -> int:
lowercase__ : int = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase__ ( A_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "wavlm"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : Tuple=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=1E-5 , SCREAMING_SNAKE_CASE_ : List[Any]="group" , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : List[Any]=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Dict=1_2_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : List[Any]=3_2_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=8_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.05 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : str=3_2_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_5_6 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]="mean" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Any=2_5_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE_ : Any=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : str=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Any=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=8_0 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : Optional[Any] = feat_extract_norm
lowerCAmelCase_ : List[str] = feat_extract_activation
lowerCAmelCase_ : List[Any] = list(_snake_case )
lowerCAmelCase_ : str = list(_snake_case )
lowerCAmelCase_ : Tuple = list(_snake_case )
lowerCAmelCase_ : str = conv_bias
lowerCAmelCase_ : Tuple = num_buckets
lowerCAmelCase_ : Optional[int] = max_bucket_distance
lowerCAmelCase_ : str = num_conv_pos_embeddings
lowerCAmelCase_ : str = num_conv_pos_embedding_groups
lowerCAmelCase_ : Dict = len(self.conv_dim )
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Tuple = hidden_dropout
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Union[str, Any] = feat_proj_dropout
lowerCAmelCase_ : Any = final_dropout
lowerCAmelCase_ : Optional[Any] = layerdrop
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = num_ctc_classes
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Any = do_stable_layer_norm
lowerCAmelCase_ : List[str] = use_weighted_layer_sum
lowerCAmelCase_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ : List[str] = apply_spec_augment
lowerCAmelCase_ : Dict = mask_time_prob
lowerCAmelCase_ : List[Any] = mask_time_length
lowerCAmelCase_ : Union[str, Any] = mask_time_min_masks
lowerCAmelCase_ : List[str] = mask_feature_prob
lowerCAmelCase_ : Dict = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ : List[str] = num_codevectors_per_group
lowerCAmelCase_ : Dict = num_codevector_groups
lowerCAmelCase_ : List[Any] = contrastive_logits_temperature
lowerCAmelCase_ : int = num_negatives
lowerCAmelCase_ : Union[str, Any] = codevector_dim
lowerCAmelCase_ : Tuple = proj_codevector_dim
lowerCAmelCase_ : int = diversity_loss_weight
# ctc loss
lowerCAmelCase_ : Any = ctc_loss_reduction
lowerCAmelCase_ : Optional[Any] = ctc_zero_infinity
# adapter
lowerCAmelCase_ : Optional[Any] = add_adapter
lowerCAmelCase_ : Tuple = adapter_kernel_size
lowerCAmelCase_ : List[str] = adapter_stride
lowerCAmelCase_ : Union[str, Any] = num_adapter_layers
lowerCAmelCase_ : List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ : int = list(_snake_case )
lowerCAmelCase_ : List[Any] = list(_snake_case )
lowerCAmelCase_ : str = list(_snake_case )
lowerCAmelCase_ : Dict = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 224 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowercase__ : str = '''\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'''
lowercase__ : int = '''\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'''
lowercase__ : int = '''\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__A : Dict = np.array([re.sub(_snake_case , '' , _snake_case) for x in predictions])
__A : Optional[Any] = np.array([re.sub(_snake_case , '' , _snake_case) for x in references])
else:
__A : Optional[Any] = np.asarray(_snake_case)
__A : Any = np.asarray(_snake_case)
if ignore_case:
__A : List[str] = np.char.lower(_snake_case)
__A : Tuple = np.char.lower(_snake_case)
if ignore_punctuation:
__A : Union[str, Any] = string.punctuation.maketrans('' , '' , string.punctuation)
__A : int = np.char.translate(_snake_case , table=_snake_case)
__A : Optional[int] = np.char.translate(_snake_case , table=_snake_case)
if ignore_numbers:
__A : Optional[Any] = string.digits.maketrans('' , '' , string.digits)
__A : Tuple = np.char.translate(_snake_case , table=_snake_case)
__A : List[str] = np.char.translate(_snake_case , table=_snake_case)
__A : Any = predictions == references
return {"exact_match": np.mean(_snake_case) * 100} | 190 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["torch", "torchsde"]
def __init__( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : int ,**_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : List[Any] ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 16 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = 1
UpperCAmelCase : Tuple = 3
UpperCAmelCase : str = (3_2, 3_2)
UpperCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def __magic_name__ ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=3_2, )
return model
@property
def __magic_name__ ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
return model
@property
def __magic_name__ ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
return CLIPTextModel(_snake_case )
@property
def __magic_name__ ( self : List[Any] ):
def extract(*__A : int, **__A : int ):
class __UpperCAmelCase :
def __init__( self : int ):
UpperCAmelCase : Union[str, Any] = torch.ones([0] )
def __magic_name__ ( self : Optional[Any], __A : List[Any] ):
self.pixel_values.to(_snake_case )
return self
return Out()
return extract
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : List[Any] = self.dummy_cond_unet
UpperCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''', clip_sample=_snake_case, set_alpha_to_one=_snake_case, )
UpperCAmelCase : str = self.dummy_vae
UpperCAmelCase : int = self.dummy_text_encoder
UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase : str = StableDiffusionPipeline(
unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, )
UpperCAmelCase : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : List[Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
UpperCAmelCase : int = sd_pipe([prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
UpperCAmelCase : Any = output.images
UpperCAmelCase : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
UpperCAmelCase : int = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=_snake_case, )[0]
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase : str = self.dummy_vae
UpperCAmelCase : int = self.dummy_text_encoder
UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase : Optional[int] = StableDiffusionPipeline(
unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, )
UpperCAmelCase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase : int = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(0 )
UpperCAmelCase : Optional[Any] = sd_pipe([prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
UpperCAmelCase : int = output.images
UpperCAmelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(0 )
UpperCAmelCase : int = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=_snake_case, )[0]
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase : int = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''', safety_checker=_snake_case )
assert isinstance(_snake_case, _snake_case )
assert isinstance(pipe.scheduler, _snake_case )
assert pipe.safety_checker is None
UpperCAmelCase : Union[str, Any] = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
UpperCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase : str = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = self.dummy_cond_unet
UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase : int = self.dummy_vae
UpperCAmelCase : List[str] = self.dummy_text_encoder
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase : Dict = unet.half()
UpperCAmelCase : Any = vae.half()
UpperCAmelCase : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase : Union[str, Any] = StableDiffusionPipeline(
unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, )
UpperCAmelCase : str = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase : Any = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Union[str, Any] = sd_pipe([prompt], num_inference_steps=2, output_type='''np''' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=_snake_case )
UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase : str = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase : Optional[int] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase : List[Any] = 4_0_0_3_6_6_0_3_4_6
UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase : Optional[Any] = torch.manual_seed(_snake_case )
UpperCAmelCase : str = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase : Optional[int] = torch.manual_seed(_snake_case )
UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.0_2_5, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
UpperCAmelCase : List[Any] = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : Optional[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=_snake_case )
UpperCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase : Optional[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase : List[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase : Tuple = 2_7_3_4_9_7_1_7_5_5
UpperCAmelCase : List[str] = 7
UpperCAmelCase : Dict = torch.manual_seed(_snake_case )
UpperCAmelCase : Union[str, Any] = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
UpperCAmelCase : str = output.images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase : Tuple = torch.manual_seed(_snake_case )
UpperCAmelCase : Union[str, Any] = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.0_2_5, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
UpperCAmelCase : int = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : Optional[int] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase : Optional[int] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase : List[Any] = 1_0_4_4_3_5_5_2_3_4
UpperCAmelCase : Optional[int] = 1_2
UpperCAmelCase : List[Any] = torch.manual_seed(_snake_case )
UpperCAmelCase : Dict = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
UpperCAmelCase : str = output.images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase : Optional[int] = torch.manual_seed(_snake_case )
UpperCAmelCase : Any = sd_pipe(
[prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.0_2_5, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 336 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( A_, A_, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = StableDiffusionSAGPipeline
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def lowercase_ ( self : Tuple)-> List[str]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
__lowerCAmelCase: Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
__lowerCAmelCase: Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0)
__lowerCAmelCase: List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase: str = CLIPTextModel(_snake_case)
__lowerCAmelCase: Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=0)-> List[Any]:
'''simple docstring'''
if str(_snake_case).startswith("mps"):
__lowerCAmelCase: Optional[Any] = torch.manual_seed(_snake_case)
else:
__lowerCAmelCase: int = torch.Generator(device=_snake_case).manual_seed(_snake_case)
__lowerCAmelCase: Any = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any])-> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: str = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
__lowerCAmelCase: Any = sag_pipe.to(_snake_case)
sag_pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: str = '''.'''
__lowerCAmelCase: Optional[Any] = torch.manual_seed(0)
__lowerCAmelCase: Any = sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np")
__lowerCAmelCase: Any = output.images
__lowerCAmelCase: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: int = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def lowercase_ ( self : int)-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
__lowerCAmelCase: Optional[Any] = sag_pipe.to(_snake_case)
sag_pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: Any = '''.'''
__lowerCAmelCase: Optional[Any] = torch.manual_seed(0)
__lowerCAmelCase: int = sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np")
__lowerCAmelCase: Dict = output.images
__lowerCAmelCase: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: int = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
__lowerCAmelCase: Tuple = sag_pipe.to(_snake_case)
sag_pipe.set_progress_bar_config(disable=_snake_case)
__lowerCAmelCase: Tuple = '''.'''
__lowerCAmelCase: List[Any] = torch.manual_seed(0)
__lowerCAmelCase: Dict = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" , )
__lowerCAmelCase: List[str] = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 217 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( A_ , unittest.TestCase ):
A_ = LayoutLMTokenizer
A_ = LayoutLMTokenizerFast
A_ = True
A_ = True
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Dict = '''UNwant\u00E9d,running'''
__a : Optional[Any] = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer_class(self.vocab_file )
__a : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [7, 4, 5, 10, 8, 9] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 27 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A__ : int ={
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
A__ : Any ={
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = list(s_dict.keys() )
for key in keys:
_lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCAmelCase = new_key.replace(__lowerCamelCase , __lowerCamelCase )
print(f"{key} -> {new_key}" )
_lowerCAmelCase = s_dict.pop(__lowerCamelCase )
return s_dict
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.basename(__lowerCamelCase )
_lowerCAmelCase = url.split("""/""" )[-2]
_lowerCAmelCase = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ) and not os.path.isfile(__lowerCamelCase ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(__lowerCamelCase ):
_lowerCAmelCase = open(__lowerCamelCase , """rb""" ).read()
if hashlib.shaaaa(__lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(__lowerCamelCase ) as source, open(__lowerCamelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__lowerCamelCase , unit_divisor=10_24 ) as loop:
while True:
_lowerCAmelCase = source.read(81_92 )
if not buffer:
break
output.write(__lowerCamelCase )
loop.update(len(__lowerCamelCase ) )
_lowerCAmelCase = open(__lowerCamelCase , """rb""" ).read()
if hashlib.shaaaa(__lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
_lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" )
_lowerCAmelCase = original_checkpoint['''dims''']
_lowerCAmelCase = original_checkpoint['''model_state_dict''']
_lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(__lowerCamelCase )
rename_keys(__lowerCamelCase )
_lowerCAmelCase = True
_lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
_lowerCAmelCase = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__lowerCamelCase , decoder_ffn_dim=__lowerCamelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
_lowerCAmelCase = WhisperForConditionalGeneration(__lowerCamelCase )
_lowerCAmelCase = model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if len(__lowerCamelCase ) > 0 and not set(__lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
_lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase = proj_out_weights
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
A__ : Any =argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A__ : List[Any] =parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 70 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 0 |
from ...configuration_utils import PretrainedConfig
lowercase_ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class __UpperCamelCase ( A_ ):
"""simple docstring"""
lowerCAmelCase_ = "tapas"
def __init__( self : List[Any] , _A : Dict=3_0522 , _A : Union[str, Any]=768 , _A : int=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : List[Any]="gelu" , _A : Optional[int]=0.1 , _A : Tuple=0.1 , _A : List[Any]=1024 , _A : Any=[3, 256, 256, 2, 256, 256, 10] , _A : List[Any]=0.02 , _A : Union[str, Any]=1e-12 , _A : str=0 , _A : Any=10.0 , _A : int=0 , _A : Optional[Any]=1.0 , _A : List[str]=None , _A : Tuple=1.0 , _A : Tuple=False , _A : List[Any]=None , _A : int=1.0 , _A : List[Any]=1.0 , _A : Optional[int]=False , _A : Optional[int]=False , _A : Optional[int]="ratio" , _A : Any=None , _A : Union[str, Any]=None , _A : List[str]=64 , _A : Optional[Any]=32 , _A : Optional[Any]=False , _A : Optional[int]=True , _A : Dict=False , _A : Tuple=False , _A : int=True , _A : List[str]=False , _A : Dict=None , _A : Optional[int]=None , **_A : int , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
__SCREAMING_SNAKE_CASE : Any = positive_label_weight
__SCREAMING_SNAKE_CASE : int = num_aggregation_labels
__SCREAMING_SNAKE_CASE : List[str] = aggregation_loss_weight
__SCREAMING_SNAKE_CASE : Optional[int] = use_answer_as_supervision
__SCREAMING_SNAKE_CASE : Optional[Any] = answer_loss_importance
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_normalized_answer_loss
__SCREAMING_SNAKE_CASE : str = huber_loss_delta
__SCREAMING_SNAKE_CASE : str = temperature
__SCREAMING_SNAKE_CASE : int = aggregation_temperature
__SCREAMING_SNAKE_CASE : List[Any] = use_gumbel_for_cells
__SCREAMING_SNAKE_CASE : Tuple = use_gumbel_for_aggregation
__SCREAMING_SNAKE_CASE : Union[str, Any] = average_approximation_function
__SCREAMING_SNAKE_CASE : Union[str, Any] = cell_selection_preference
__SCREAMING_SNAKE_CASE : Any = answer_loss_cutoff
__SCREAMING_SNAKE_CASE : List[Any] = max_num_rows
__SCREAMING_SNAKE_CASE : str = max_num_columns
__SCREAMING_SNAKE_CASE : int = average_logits_per_cell
__SCREAMING_SNAKE_CASE : str = select_one_column
__SCREAMING_SNAKE_CASE : str = allow_empty_column_selection
__SCREAMING_SNAKE_CASE : Any = init_cell_selection_weights_to_zero
__SCREAMING_SNAKE_CASE : Optional[int] = reset_position_index_per_cell
__SCREAMING_SNAKE_CASE : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__SCREAMING_SNAKE_CASE : Optional[Any] = aggregation_labels
__SCREAMING_SNAKE_CASE : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _snake_case ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 303 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : int ) ->float:
lowerCamelCase__ : Optional[int] =0
while len(__lowerCamelCase ) > 1:
lowerCamelCase__ : Optional[int] =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCamelCase__ : List[Any] =files.index(min(__lowerCamelCase ) )
temp += files[min_index]
files.pop(__lowerCamelCase )
files.append(__lowerCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 126 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 0 |
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
snake_case : Optional[Any] = generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def UpperCamelCase ( __lowerCamelCase : str ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
snake_case : list[list[int]] = []
for current_row_idx in range(__lowerCamelCase ):
snake_case : int = populate_current_row(__lowerCamelCase , __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
snake_case : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case : Tuple = 1, 1
for current_col_idx in range(1 , __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return current_row
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , ):
snake_case : Optional[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case : Dict = triangle[current_row_idx - 1][current_col_idx]
snake_case : Dict = above_to_left_elt + above_to_right_elt
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
snake_case : list[list[int]] = [[1]]
for row_index in range(1 , __lowerCamelCase ):
snake_case : Any = [0] + result[-1] + [0]
snake_case : List[Any] = row_index + 1
# Calculate the number of distinct elements in a row
snake_case : List[str] = sum(divmod(__lowerCamelCase , 2 ) )
snake_case : Dict = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case : List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case : List[Any] = row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def UpperCamelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ) -> None:
snake_case : str = f"""{func.__name__}({value})"""
snake_case : Optional[int] = timeit(f"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase , __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 59 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="Translation" ,init=A_ ,repr=A_ )
def __call__( self : List[str] ) -> Any:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[List] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "dict"
lowerCAmelCase : ClassVar[Any] = None
lowerCAmelCase : str = field(default="TranslationVariableLanguages" ,init=A_ ,repr=A_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
lowercase__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({", ".join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__ : str = []
for lang, text in translation_dict.items():
if isinstance(_snake_case ,_snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__ : Optional[Any] = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase ( self : List[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 16 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 62 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> List[Any]:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : str = [[float('inf' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowerCAmelCase_ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCAmelCase_ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowercase__ : Optional[int] = int(input("""Enter number of vertices: """))
lowercase__ : List[str] = int(input("""Enter number of edges: """))
lowercase__ : Optional[int] = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
lowercase__ : Optional[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
lowercase__ : List[Any] = int(input("""Enter source:"""))
lowercase__ : List[Any] = int(input("""Enter destination:"""))
lowercase__ : int = float(input("""Enter weight:"""))
lowercase__ : Tuple = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 224 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 0 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase__ : Union[str, Any] = get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Any=0 ) -> List[Any]:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A : Optional[Any] = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A : Optional[int] = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Saving model to {output_model_file}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A : Union[str, Any] = os.path.join(__lowerCamelCase , f'{MODEL_NAME}_{model_index}' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
logger.info(f'Saving model to {ckpt_dir}' )
__A : Any = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__lowerCamelCase , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : List[Any]=0 ) -> Tuple:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__lowerCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
__A : Optional[Any] = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__A : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A : List[Any] = torch.load(__lowerCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__A : List[Any] = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__A : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Loading model from {input_model_file}' )
__A : List[str] = torch.load(__lowerCamelCase )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__A : str = (
os.path.join(__lowerCamelCase , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__A : Dict = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__lowerCamelCase , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , planner=DefaultLoadPlanner() , )
__A : Optional[int] = state_dict['''model''']
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(__lowerCamelCase )
def _lowerCAmelCase ( __snake_case : str , __snake_case : Any , __snake_case : Dict , __snake_case : List[str] , __snake_case : Any , __snake_case : Dict=0 ) -> int:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__A : Tuple = FSDP.optim_state_dict(__lowerCamelCase , __lowerCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__A : List[Any] = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__A : Any = os.path.join(__lowerCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : List[str]=0 ) -> Any:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__A : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__A : Optional[int] = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__A : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__A : Tuple = torch.load(__lowerCamelCase )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__A : Any = (
os.path.join(__lowerCamelCase , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__A : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__lowerCamelCase ) , )
__A : int = optim_state['''optimizer''']
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__A : List[Any] = FSDP.optim_state_dict_to_load(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
optimizer.load_state_dict(__lowerCamelCase ) | 190 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowercase__ : Tuple = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_lowerCamelCase : List[str] = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_lowerCamelCase : List[Any] = {
"ctrl": 2_5_6,
}
_lowerCamelCase : List[Any] = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def a__ ( UpperCAmelCase : Any ) -> Union[str, Any]:
UpperCAmelCase : Dict = set()
UpperCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : Dict = char
UpperCAmelCase : Tuple = set(__lowerCamelCase )
return pairs
class __UpperCAmelCase ( A_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTROL_CODES
def __init__( self : int, __A : str, __A : Tuple, __A : List[str]="<unk>", **__A : List[str] ):
super().__init__(unk_token=_snake_case, **_snake_case )
with open(_snake_case, encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : Dict = json.load(_snake_case )
UpperCAmelCase : str = {v: k for k, v in self.encoder.items()}
with open(_snake_case, encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase : List[Any] = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase : List[str] = dict(zip(_snake_case, range(len(_snake_case ) ) ) )
UpperCAmelCase : int = {}
@property
def __magic_name__ ( self : Optional[Any] ):
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
return dict(self.encoder, **self.added_tokens_encoder )
def __magic_name__ ( self : Any, __A : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase : str = tuple(_snake_case )
UpperCAmelCase : Dict = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
UpperCAmelCase : Any = get_pairs(_snake_case )
if not pairs:
return token
while True:
UpperCAmelCase : Dict = min(_snake_case, key=lambda __A : self.bpe_ranks.get(_snake_case, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase : Any = bigram
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = 0
while i < len(_snake_case ):
try:
UpperCAmelCase : Optional[Any] = word.index(_snake_case, _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : int = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Tuple = tuple(_snake_case )
UpperCAmelCase : int = new_word
if len(_snake_case ) == 1:
break
else:
UpperCAmelCase : Optional[Any] = get_pairs(_snake_case )
UpperCAmelCase : Union[str, Any] = '''@@ '''.join(_snake_case )
UpperCAmelCase : List[Any] = word[:-4]
UpperCAmelCase : int = word
return word
def __magic_name__ ( self : Optional[Any], __A : Optional[int] ):
UpperCAmelCase : List[str] = []
UpperCAmelCase : int = re.findall(R'''\S+\n?''', _snake_case )
for token in words:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : List[Any], __A : List[Any] ):
return self.encoder.get(_snake_case, self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Optional[int], __A : str ):
return self.decoder.get(_snake_case, self.unk_token )
def __magic_name__ ( self : List[str], __A : str ):
UpperCAmelCase : List[str] = ''' '''.join(_snake_case ).replace('''@@ ''', '''''' ).strip()
return out_string
def __magic_name__ ( self : Dict, __A : str, __A : Optional[str] = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : int = os.path.join(
_snake_case, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Dict = os.path.join(
_snake_case, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_snake_case, ensure_ascii=_snake_case ) + '''\n''' )
UpperCAmelCase : Optional[int] = 0
with open(_snake_case, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase : List[Any] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 336 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( A_ ):
SCREAMING_SNAKE_CASE_ : int = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = "FlavaImageProcessor"
SCREAMING_SNAKE_CASE_ : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Union[str, Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _snake_case , )
__lowerCAmelCase: Dict = kwargs.pop("feature_extractor")
__lowerCAmelCase: List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(_snake_case , _snake_case)
__lowerCAmelCase: str = self.image_processor
def __call__( self : Tuple , UpperCamelCase__ : Optional[ImageInput] = None , UpperCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , )-> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
__lowerCAmelCase: List[str] = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
__lowerCAmelCase: List[Any] = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowercase_ ( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str])-> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowercase_ ( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any)-> Any:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.tokenizer.model_input_names
__lowerCAmelCase: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _snake_case , )
return self.image_processor_class
@property
def lowercase_ ( self : Any)-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _snake_case , )
return self.image_processor
| 217 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 0 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] = 100 ):
__a : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
__a : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[str] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
requires_backends(self ,'''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,_snake_case : Union[str, List[str], "Image", List["Image"]] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,**_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase__ : Any = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,_snake_case : Union[str, Any]="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = load_image(_snake_case )
lowercase__ : int = self.image_processor(images=[image] ,return_tensors=self.framework )
lowercase__ : str = candidate_labels
lowercase__ : Dict = [hypothesis_template.format(_snake_case ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(_snake_case ,return_tensors=self.framework ,padding=_snake_case )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
lowercase__ : Union[str, Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,_snake_case ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Tuple = self.model(**_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Dict = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Tuple = probs.tolist()
if not isinstance(_snake_case ,_snake_case ):
lowercase__ : Any = [scores]
elif self.framework == "tf":
lowercase__ : List[str] = stable_softmax(_snake_case ,axis=-1 )
lowercase__ : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_snake_case ,_snake_case ) ,key=lambda _snake_case : -x[0] )
]
return result
| 16 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A__ : Any =logging.get_logger(__name__)
A__ : Optional[Any] ={'''vocab_file''': '''spiece.model'''}
A__ : str ={
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class UpperCAmelCase ( A_ ):
def __init__( self : str , __snake_case : Tuple , __snake_case : int=False , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=False , __snake_case : Dict="<s>" , __snake_case : Optional[Any]="</s>" , __snake_case : List[str]="<unk>" , __snake_case : int="<sep>" , __snake_case : List[Any]="<pad>" , __snake_case : Union[str, Any]="<cls>" , __snake_case : str="<mask>" , __snake_case : int=["<eop>", "<eod>"] , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[int] , ) -> None:
_lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase__ ( self : Tuple ) -> Optional[Any]:
return len(self.sp_model )
def lowercase__ ( self : Optional[Any] ) -> Dict:
_lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : List[str] , __snake_case : int ) -> Union[str, Any]:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] ) -> Optional[Any]:
if self.remove_space:
_lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("""NFKD""" , _snake_case )
_lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def lowercase__ ( self : int , __snake_case : str ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_snake_case )
_lowerCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case )
_lowerCAmelCase = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def lowercase__ ( self : List[str] , __snake_case : List[Any] ) -> Union[str, Any]:
return self.sp_model.PieceToId(_snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Optional[Any] ) -> Tuple:
return self.sp_model.IdToPiece(_snake_case )
def lowercase__ ( self : Dict , __snake_case : int ) -> Tuple:
_lowerCAmelCase = ''''''.join(_snake_case ).replace(_snake_case , """ """ ).strip()
return out_string
def lowercase__ ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def lowercase__ ( self : List[Any] , *__snake_case : List[Any] , **__snake_case : List[str] ) -> Dict:
_lowerCAmelCase = super()._decode(*_snake_case , **_snake_case )
_lowerCAmelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 70 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 | 0 |
import os
def a__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/grid.txt''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowerCamelCase ) for x in f.readline().split()] )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# right
for i in range(20 ):
for j in range(17 ):
__SCREAMING_SNAKE_CASE : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__SCREAMING_SNAKE_CASE : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
__SCREAMING_SNAKE_CASE : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__SCREAMING_SNAKE_CASE : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__SCREAMING_SNAKE_CASE : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__SCREAMING_SNAKE_CASE : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__SCREAMING_SNAKE_CASE : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__SCREAMING_SNAKE_CASE : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 303 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( A_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CLIPTokenizer
SCREAMING_SNAKE_CASE_ = CLIPTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCamelCase__ : Dict =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCamelCase__ : Union[str, Any] =dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCamelCase__ : Union[str, Any] =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowerCamelCase__ : List[str] ={'''unk_token''': '''<unk>'''}
lowerCamelCase__ : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def UpperCAmelCase__ ( self :Any , **lowerCamelCase_ :Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self :Optional[Any] , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Dict ='''lower newer'''
lowerCamelCase__ : Optional[Any] ='''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ : Dict ='''lower newer'''
lowerCamelCase__ : Union[str, Any] =['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowerCamelCase__ : Tuple =tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCamelCase__ : List[str] =tokens + [tokenizer.unk_token]
lowerCamelCase__ : Optional[Any] =[10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
@require_ftfy
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : List[str] =self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
lowerCamelCase__ : Union[str, Any] ='''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowerCamelCase__ : str =tokenizer_s.tokenize(_snake_case )
lowerCamelCase__ : Optional[Any] =tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCamelCase__ : Optional[Any] ='''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowerCamelCase__ : int =tokenizer_s.tokenize(_snake_case )
lowerCamelCase__ : Union[str, Any] =tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Test that the tokenization is identical on unicode of space type
lowerCamelCase__ : Optional[int] =[
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCamelCase__ : Tuple =tokenizer_s.tokenize(_snake_case )
lowerCamelCase__ : Optional[int] =tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Test that the tokenization is identical on unicode of line break type
lowerCamelCase__ : Tuple =[
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCamelCase__ : Dict =tokenizer_s.tokenize(_snake_case )
lowerCamelCase__ : Tuple =tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__ : Union[str, Any] ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ : List[Any] =f"""{text_of_1_token} {text_of_1_token}"""
lowerCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , )
lowerCamelCase__ : Optional[int] =tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
lowerCamelCase__ : Tuple =f""" {text}"""
lowerCamelCase__ : Any =self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , )
lowerCamelCase__ : Dict =tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
with self.assertRaises(_snake_case ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass | 126 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class UpperCAmelCase ( A_ ):
A__ : Any = "lxmert"
A__ : Union[str, Any] = {}
def __init__(self : List[str] , snake_case__ : Any=3_05_22 , snake_case__ : Optional[Any]=7_68 , snake_case__ : Any=12 , snake_case__ : Optional[int]=95_00 , snake_case__ : Optional[Any]=16_00 , snake_case__ : Union[str, Any]=4_00 , snake_case__ : Optional[int]=30_72 , snake_case__ : List[Any]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[int]=5_12 , snake_case__ : Any=2 , snake_case__ : Tuple=0.02 , snake_case__ : Any=1e-12 , snake_case__ : str=9 , snake_case__ : Optional[int]=5 , snake_case__ : List[str]=5 , snake_case__ : int=20_48 , snake_case__ : Optional[int]=4 , snake_case__ : str=6.67 , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]=True , snake_case__ : str=True , snake_case__ : List[str]=True , snake_case__ : int=True , snake_case__ : Any=True , snake_case__ : Any=True , **snake_case__ : Optional[int] , ) -> Dict:
'''simple docstring'''
snake_case : Tuple = vocab_size
snake_case : str = hidden_size
snake_case : List[Any] = num_attention_heads
snake_case : Optional[Any] = hidden_act
snake_case : Optional[int] = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : Optional[Any] = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : List[Any] = layer_norm_eps
snake_case : Optional[int] = num_qa_labels
snake_case : List[Any] = num_object_labels
snake_case : int = num_attr_labels
snake_case : Optional[int] = l_layers
snake_case : Any = x_layers
snake_case : Optional[Any] = r_layers
snake_case : int = visual_feat_dim
snake_case : Optional[Any] = visual_pos_dim
snake_case : Optional[int] = visual_loss_normalizer
snake_case : List[str] = task_matched
snake_case : Optional[Any] = task_mask_lm
snake_case : Tuple = task_obj_predict
snake_case : int = task_qa
snake_case : Any = visual_obj_loss
snake_case : Optional[Any] = visual_attr_loss
snake_case : Tuple = visual_feat_loss
snake_case : str = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_snake_case )
| 59 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : int, *UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=None, UpperCAmelCase__ : Optional[Any]=None, **UpperCAmelCase__ : Optional[int] ):
super().__init__(*UpperCAmelCase__, **UpperCAmelCase__ )
__lowercase = eval_examples
__lowercase = post_process_function
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[Dataset] = None, UpperCAmelCase__ : Tuple=None, UpperCAmelCase__ : Optional[List[str]] = None, UpperCAmelCase__ : str = "eval", **UpperCAmelCase__ : List[str], ):
__lowercase = gen_kwargs.copy()
__lowercase = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__lowercase = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__lowercase = gen_kwargs
__lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase = self.get_eval_dataloader(UpperCAmelCase__ )
__lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = time.time()
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
UpperCAmelCase__, description="Evaluation", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=UpperCAmelCase__, metric_key_prefix=UpperCAmelCase__, )
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__, UpperCAmelCase__, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase = self.post_process_function(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
else:
__lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase = self.callback_handler.on_evaluate(self.args, self.state, self.control, UpperCAmelCase__ )
return metrics
def _lowercase ( self : List[str], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str=None, UpperCAmelCase__ : str = "test", **UpperCAmelCase__ : str ):
__lowercase = gen_kwargs.copy()
__lowercase = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = time.time()
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
UpperCAmelCase__, description="Prediction", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=UpperCAmelCase__, metric_key_prefix=UpperCAmelCase__, )
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__, UpperCAmelCase__, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase = self.post_process_function(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, "predict" )
__lowercase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=UpperCAmelCase__ )
| 17 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17 |
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17 | 1 |
"""simple docstring"""
from math import factorial
def _A ( UpperCamelCase_ : int = 100) -> int:
'''simple docstring'''
return sum(map(UpperCamelCase_, str(factorial(UpperCamelCase_))))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 17 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = ["keras_nlp"]
def __init__( self : str, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Union[str, Any] ):
requires_backends(self, ["keras_nlp"] )
| 17 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_a = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class _lowerCAmelCase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : str = " " ):
__lowercase = sentence_delimiter
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : str ):
return list(UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[str] ):
__lowercase = []
for sent_idx, sentence in enumerate(UpperCAmelCase__ ):
chars.extend(self.process_string(UpperCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_a = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_a = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_a = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_a = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
_a = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence" ),
"references": datasets.Value("string", id="sequence" ),
} ), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
], )
def _lowercase ( self : str, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase__, UpperCAmelCase__, truth_transform=UpperCAmelCase__, hypothesis_transform=UpperCAmelCase__, )["wer"]
__lowercase = 0
__lowercase = 0
for prediction, reference in zip(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = jiwer.compute_measures(
UpperCAmelCase__, UpperCAmelCase__, truth_transform=UpperCAmelCase__, hypothesis_transform=UpperCAmelCase__, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 17 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (DDPMScheduler,)
def _lowercase ( self : Optional[int], **UpperCAmelCase__ : List[Any] ):
__lowercase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Tuple ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def _lowercase ( self : Any ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__, beta_end=UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def _lowercase ( self : int ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def _lowercase ( self : str ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__, prediction_type=UpperCAmelCase__, sample_max_value=UpperCAmelCase__, )
def _lowercase ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def _lowercase ( self : Dict ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
__lowercase = model(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _lowercase ( self : List[str] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
__lowercase = model(UpperCAmelCase__, UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(UpperCAmelCase__ ) )
__lowercase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _lowercase ( self : Optional[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
__lowercase = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
__lowercase = -1
else:
__lowercase = timesteps[i + 1]
__lowercase = scheduler.previous_timestep(UpperCAmelCase__ )
__lowercase = prev_t.item()
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__, msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def _lowercase ( self : List[Any] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [1_0_0, 8_7, 5_0, 1, 0]
__lowercase = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__, timesteps=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**UpperCAmelCase__ )
__lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 17 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : List[str] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__, "tf_padding" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase__, "depth_multiplier" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple=1_3, UpperCAmelCase__ : List[str]=3, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : List[Any]=0.25, UpperCAmelCase__ : Tuple=8, UpperCAmelCase__ : Any=True, UpperCAmelCase__ : Union[str, Any]=1_0_2_4, UpperCAmelCase__ : Tuple=3_2, UpperCAmelCase__ : List[Any]="relu6", UpperCAmelCase__ : str=0.1, UpperCAmelCase__ : List[Any]=0.02, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : Optional[int]=1_0, UpperCAmelCase__ : int=None, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = depth_multiplier
__lowercase = min_depth
__lowercase = tf_padding
__lowercase = int(last_hidden_size * depth_multiplier )
__lowercase = output_stride
__lowercase = hidden_act
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def _lowercase ( self : Tuple ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any] ):
__lowercase = MobileNetVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def _lowercase ( self : int, UpperCAmelCase__ : int, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = self.num_labels
__lowercase = MobileNetVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[int] = False
def _lowercase ( self : Dict ):
__lowercase = MobileNetVaModelTester(self )
__lowercase = MobileNetVaConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__ )
def _lowercase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _lowercase ( self : Dict ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _lowercase ( self : Dict ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _lowercase ( self : List[Any] ):
pass
def _lowercase ( self : int ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : List[Any] ):
def check_hidden_states_output(UpperCAmelCase__ : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = 2_6
self.assertEqual(len(UpperCAmelCase__ ), UpperCAmelCase__ )
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : int ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileNetVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> List[Any]:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _lowercase ( self : Dict ):
__lowercase = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(UpperCAmelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**UpperCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, UpperCAmelCase__ )
__lowercase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
| 17 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : np.ndarray
__UpperCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 17 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 1 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ):
__lowercase = ""
__lowercase = ""
__lowercase = []
def _lowercase ( self : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__lowercase = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
__lowercase = self.__min_dist_top_down_dp(UpperCAmelCase__, n - 1 )
__lowercase = self.__min_dist_top_down_dp(m - 1, UpperCAmelCase__ )
__lowercase = self.__min_dist_top_down_dp(m - 1, n - 1 )
__lowercase = 1 + min(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return self.dp[m][n]
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : str ):
__lowercase = worda
__lowercase = worda
__lowercase = [[-1 for _ in range(len(UpperCAmelCase__ ) )] for _ in range(len(UpperCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCAmelCase__ ) - 1, len(UpperCAmelCase__ ) - 1 )
def _lowercase ( self : int, UpperCAmelCase__ : str, UpperCAmelCase__ : str ):
__lowercase = worda
__lowercase = worda
__lowercase = len(UpperCAmelCase__ )
__lowercase = len(UpperCAmelCase__ )
__lowercase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__lowercase = j
elif j == 0: # second string is empty
__lowercase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__lowercase = self.dp[i - 1][j - 1]
else:
__lowercase = self.dp[i][j - 1]
__lowercase = self.dp[i - 1][j]
__lowercase = self.dp[i - 1][j - 1]
__lowercase = 1 + min(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
_a = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_a = input('Enter the first string: ').strip()
_a = input('Enter the second string: ').strip()
print()
print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 17 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 1 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 0 )
__lowercase = [6_0]
__lowercase = [1_0]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 0 )
def _lowercase ( self : Union[str, Any] ):
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 5 )
def _lowercase ( self : Dict ):
__lowercase = 5_0
__lowercase = [6_0, 1_0_0, 1_2_0]
__lowercase = [1_0, 2_0, 3_0]
__lowercase = len(UpperCAmelCase__ )
self.assertEqual(k.knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 17 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 | 1 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
"""simple docstring"""
@property
def _lowercase ( self : Dict ):
return self.get_dummy_input()
@property
def _lowercase ( self : Optional[int] ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def _lowercase ( self : Dict, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : str=False, UpperCAmelCase__ : Union[str, Any]=False, UpperCAmelCase__ : List[str]=False, ):
__lowercase = 4
__lowercase = 3_2
__lowercase = (3_2, 3_2)
__lowercase = torch.manual_seed(0 )
__lowercase = torch.device(UpperCAmelCase__ )
__lowercase = (batch_size, num_channels) + sizes
__lowercase = randn_tensor(UpperCAmelCase__, generator=UpperCAmelCase__, device=UpperCAmelCase__ )
__lowercase = {"hidden_states": hidden_states}
if include_temb:
__lowercase = 1_2_8
__lowercase = randn_tensor((batch_size, temb_channels), generator=UpperCAmelCase__, device=UpperCAmelCase__ )
if include_res_hidden_states_tuple:
__lowercase = torch.manual_seed(1 )
__lowercase = (randn_tensor(UpperCAmelCase__, generator=UpperCAmelCase__, device=UpperCAmelCase__ ),)
if include_encoder_hidden_states:
__lowercase = floats_tensor((batch_size, 3_2, 3_2) ).to(UpperCAmelCase__ )
if include_skip_sample:
__lowercase = randn_tensor(((batch_size, 3) + sizes), generator=UpperCAmelCase__, device=UpperCAmelCase__ )
return dummy_input
def _lowercase ( self : Tuple ):
__lowercase = {
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
__lowercase = 3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
__lowercase = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self : Any, UpperCAmelCase__ : Optional[int] ):
__lowercase ,__lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**UpperCAmelCase__ )
unet_block.to(UpperCAmelCase__ )
unet_block.eval()
with torch.no_grad():
__lowercase = unet_block(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = output[0]
self.assertEqual(output.shape, self.output_shape )
__lowercase = output[0, -1, -3:, -3:]
__lowercase = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
assert torch_all_close(output_slice.flatten(), UpperCAmelCase__, atol=5E-3 )
@unittest.skipIf(torch_device == "mps", "Training is not supported in mps" )
def _lowercase ( self : Any ):
__lowercase ,__lowercase = self.prepare_init_args_and_inputs_for_common()
__lowercase = self.block_class(**UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
__lowercase = model(**UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = output[0]
__lowercase = torch.device(UpperCAmelCase__ )
__lowercase = randn_tensor(output.shape, device=UpperCAmelCase__ )
__lowercase = torch.nn.functional.mse_loss(UpperCAmelCase__, UpperCAmelCase__ )
loss.backward()
| 17 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
_a = '2020.9.26'
_a = 'xcodz-dot, cclaus, dhruvmanila'
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> tuple[float, float]:
'''simple docstring'''
if not all(isinstance(UpperCamelCase_, (float, int)) for val in locals().values()):
__lowercase = F"""Input values must either be float or int: {list(locals().values())}"""
raise TypeError(UpperCamelCase_)
__lowercase = ((x * distance) / (z + distance)) * scale
__lowercase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : str, UpperCamelCase_ : float) -> tuple[float, float, float]:
'''simple docstring'''
if not isinstance(UpperCamelCase_, UpperCamelCase_):
raise TypeError("Axis must be a str")
__lowercase = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase_, (float, int)) for val in input_variables.values()):
__lowercase = (
"Input values except axis must either be float or int: "
F"""{list(input_variables.values())}"""
)
raise TypeError(UpperCamelCase_)
__lowercase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__lowercase = x * math.cos(UpperCamelCase_) - y * math.sin(UpperCamelCase_)
__lowercase = y * math.cos(UpperCamelCase_) + x * math.sin(UpperCamelCase_)
__lowercase = z
elif axis == "x":
__lowercase = y * math.cos(UpperCamelCase_) - z * math.sin(UpperCamelCase_)
__lowercase = z * math.cos(UpperCamelCase_) + y * math.sin(UpperCamelCase_)
__lowercase = x
elif axis == "y":
__lowercase = x * math.cos(UpperCamelCase_) - z * math.sin(UpperCamelCase_)
__lowercase = z * math.cos(UpperCamelCase_) + x * math.sin(UpperCamelCase_)
__lowercase = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(F"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 17 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _A ( UpperCamelCase_ : Any) -> List[Any]:
'''simple docstring'''
__lowercase = filter(lambda UpperCamelCase_: p.requires_grad, model.parameters())
__lowercase = sum([np.prod(p.size()) for p in model_parameters])
return params
_a = logging.getLogger(__name__)
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> Optional[int]:
'''simple docstring'''
if metric == "rouge2":
__lowercase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowercase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowercase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function.")
__lowercase = ModelCheckpoint(
dirpath=UpperCamelCase_, filename=UpperCamelCase_, monitor=F"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : List[str]) -> int:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=UpperCamelCase_, verbose=UpperCamelCase_, )
class _lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCAmelCase__ )
@rank_zero_only
def _lowercase ( self : List[Any], UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : pl.LightningModule, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int]=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowercase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowercase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowercase = od / "test_results.txt"
__lowercase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowercase = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowercase = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
generations_file.parent.mkdir(exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__, "a+" ) as writer:
for key in sorted(UpperCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowercase = metrics[key]
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = val.item()
__lowercase = F"""{key}: {val:.6f}\n"""
writer.write(UpperCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
__lowercase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(UpperCAmelCase__ )
@rank_zero_only
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
try:
__lowercase = pl_module.model.model.num_parameters()
except AttributeError:
__lowercase = pl_module.model.num_parameters()
__lowercase = count_trainable_parameters(UpperCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _lowercase ( self : Any, UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : pl.LightningModule ):
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(UpperCAmelCase__, UpperCAmelCase__, "test" )
@rank_zero_only
def _lowercase ( self : int, UpperCAmelCase__ : pl.Trainer, UpperCAmelCase__ : Optional[Any] ):
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 17 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 1 |
"""simple docstring"""
from math import pow, sqrt
def _A ( *UpperCamelCase_ : float) -> bool:
'''simple docstring'''
__lowercase = len(UpperCamelCase_) > 0 and all(value > 0.0 for value in values)
return result
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_)
else ValueError("Input Error: Molar mass values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a, 2), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a, 2) / molar_mass, 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
| 17 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : str ):
__lowercase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__lowercase = AutoTokenizer.from_pretrained("google/mt5-small" )
__lowercase = tokenizer("Hello there", return_tensors="np" ).input_ids
__lowercase = tokenizer("Hi I am", return_tensors="np" ).input_ids
__lowercase = shift_tokens_right(UpperCAmelCase__, model.config.pad_token_id, model.config.decoder_start_token_id )
__lowercase = model(UpperCAmelCase__, decoder_input_ids=UpperCAmelCase__ ).logits
__lowercase = optax.softmax_cross_entropy(UpperCAmelCase__, onehot(UpperCAmelCase__, logits.shape[-1] ) ).mean()
__lowercase = -(labels.shape[-1] * loss.item())
__lowercase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 17 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "openai/whisper-base"
__UpperCAmelCase : Union[str, Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase : List[str] = "transcriber"
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : str = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ["audio"]
__UpperCAmelCase : Tuple = ["text"]
def _lowercase ( self : str, UpperCAmelCase__ : int ):
return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ):
return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
| 17 | 1 |
"""simple docstring"""
def _A ( UpperCamelCase_ : int) -> bool:
'''simple docstring'''
return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a = int(input('Enter number: ').strip())
print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 17 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any, UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[int], UpperCamelCase_ : List[Any]) -> str:
'''simple docstring'''
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCamelCase_, torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
__lowercase = load_file(UpperCamelCase_)
__lowercase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowercase = key.split(".")[0].split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_")
__lowercase = pipeline.text_encoder
else:
__lowercase = key.split(".")[0].split(LORA_PREFIX_UNET + "_")[-1].split("_")
__lowercase = pipeline.unet
# find the target layer
__lowercase = layer_infos.pop(0)
while len(UpperCamelCase_) > -1:
try:
__lowercase = curr_layer.__getattr__(UpperCamelCase_)
if len(UpperCamelCase_) > 0:
__lowercase = layer_infos.pop(0)
elif len(UpperCamelCase_) == 0:
break
except Exception:
if len(UpperCamelCase_) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
__lowercase = layer_infos.pop(0)
__lowercase = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down", "lora_up"))
pair_keys.append(UpperCamelCase_)
else:
pair_keys.append(UpperCamelCase_)
pair_keys.append(key.replace("lora_up", "lora_down"))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
__lowercase = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
__lowercase = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_, UpperCamelCase_).unsqueeze(2).unsqueeze(3)
else:
__lowercase = state_dict[pair_keys[0]].to(torch.floataa)
__lowercase = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_, UpperCamelCase_)
# update visited list
for item in pair_keys:
visited.append(UpperCamelCase_)
return pipeline
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
_a = parser.parse_args()
_a = args.base_model_path
_a = args.checkpoint_path
_a = args.dump_path
_a = args.lora_prefix_unet
_a = args.lora_prefix_text_encoder
_a = args.alpha
_a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 17 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
__lowercase = Path(UpperCamelCase_)
__lowercase = Path(UpperCamelCase_)
dest_dir.mkdir(exist_ok=UpperCamelCase_)
for path in src_dir.iterdir():
__lowercase = [x.rstrip() for x in list(path.open().readlines())][:n]
__lowercase = dest_dir.joinpath(path.name)
print(UpperCamelCase_)
dest_path.open("w").write("\n".join(UpperCamelCase_))
if __name__ == "__main__":
fire.Fire(minify)
| 17 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17 | 1 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_a = True
except ImportError:
_a = False
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( UpperCamelCase_ : Namespace) -> List[Any]:
'''simple docstring'''
return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@staticmethod
def _lowercase ( UpperCAmelCase__ : ArgumentParser ):
__lowercase = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file", type=UpperCAmelCase__, help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path", type=UpperCAmelCase__, help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self : Tuple, UpperCAmelCase__ : bool, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any]=None, *UpperCAmelCase__ : Union[str, Any] ):
__lowercase = testing
__lowercase = testing_file
__lowercase = path
def _lowercase ( self : Any ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowercase = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:2_2]]
if len(UpperCAmelCase__ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
__lowercase = (
Path(UpperCAmelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowercase = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCAmelCase__ ) )
else:
with open(self._testing_file, "r" ) as configuration_file:
__lowercase = json.load(UpperCAmelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ), no_input=UpperCAmelCase__, extra_context=UpperCAmelCase__, )
__lowercase = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + "/configuration.json", "r" ) as configuration_file:
__lowercase = json.load(UpperCAmelCase__ )
__lowercase = configuration["lowercase_modelname"]
__lowercase = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F"""{directory}/configuration.json""" )
__lowercase = "PyTorch" in generate_tensorflow_pytorch_and_flax
__lowercase = "TensorFlow" in generate_tensorflow_pytorch_and_flax
__lowercase = "Flax" in generate_tensorflow_pytorch_and_flax
__lowercase = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""", exist_ok=UpperCAmelCase__ )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""", "w" ):
pass
shutil.move(
F"""{directory}/__init__.py""", F"""{model_dir}/__init__.py""", )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""", F"""{model_dir}/configuration_{lowercase_model_name}.py""", )
def remove_copy_lines(UpperCAmelCase__ : int ):
with open(UpperCAmelCase__, "r" ) as f:
__lowercase = f.readlines()
with open(UpperCAmelCase__, "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCAmelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""", F"""{model_dir}/modeling_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""", F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""", )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""", F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""", F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""", )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""", F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""", F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""", )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""", F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""", )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""", F"""{model_dir}/tokenization_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""", F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""", )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCAmelCase__ : str, UpperCAmelCase__ : str, UpperCAmelCase__ : List[str] ):
# Create temp file
__lowercase ,__lowercase = mkstemp()
__lowercase = False
with fdopen(UpperCAmelCase__, "w" ) as new_file:
with open(UpperCAmelCase__ ) as old_file:
for line in old_file:
new_file.write(UpperCAmelCase__ )
if line_to_copy_below in line:
__lowercase = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCAmelCase__ )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(UpperCAmelCase__, UpperCAmelCase__ )
# Remove original file
remove(UpperCAmelCase__ )
# Move new file
move(UpperCAmelCase__, UpperCAmelCase__ )
def skip_units(UpperCAmelCase__ : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCAmelCase__ : Optional[Any] ):
with open(UpperCAmelCase__ ) as datafile:
__lowercase = []
__lowercase = False
__lowercase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowercase = line.split("\"" )[1]
__lowercase = skip_units(UpperCAmelCase__ )
elif "# Below: " in line and "##" not in line:
__lowercase = line.split("\"" )[1]
__lowercase = skip_units(UpperCAmelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = []
elif "# Replace with" in line and "##" not in line:
__lowercase = []
elif "##" not in line:
lines_to_copy.append(UpperCAmelCase__ )
remove(UpperCAmelCase__ )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(UpperCAmelCase__ )
| 17 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.