code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : str= logging.get_logger(__name__)
_a : Optional[int]= "▁"
_a : str= {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_a : List[Any]= {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_a : Union[str, Any]= {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_a : Tuple= {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_a : Union[str, Any]= {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = ["input_ids"]
UpperCAmelCase : Any = VOCAB_FILES_NAMES
UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__(self : Tuple , _A : Tuple , _A : Optional[int]=None , _A : Optional[int]=False , _A : Union[str, Any]="utf8" , _A : Optional[int]="[UNK]" , _A : Dict="[SEP]" , _A : Tuple="[PAD]" , _A : Any="[CLS]" , _A : Optional[Any]="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case : Any = do_lower_case
__snake_case : Optional[Any] = sentencepiece_model_ckpt
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : Optional[int] = self.load_vocab(filepath=_A)
else:
__snake_case : Optional[int] = {self.sp_model.id_to_piece(_A): id for id in range(self.sp_model.get_piece_size())}
__snake_case : int = {v: k for k, v in self.vocab.items()}
def _lowercase (self : str , _A : Optional[int]) -> Dict:
if text is None:
return None
__snake_case : Optional[int] = self.tokenize(_A)
__snake_case , __snake_case : Tuple = '', []
for i, ch in enumerate(_A):
if ch in self.SP_CHAR_MAPPING:
__snake_case : List[Any] = self.SP_CHAR_MAPPING.get(_A)
else:
__snake_case : Union[str, Any] = unicodedata.normalize('NFKC' , _A)
if self.is_whitespace(_A):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A))
__snake_case , __snake_case , __snake_case : Any = normalized_text, [], 0
if self.do_lower_case:
__snake_case : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_A) + offset
__snake_case : List[str] = start + len(_A)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
__snake_case : Union[str, Any] = end
return token_mapping
@property
def _lowercase (self : Union[str, Any]) -> List[Any]:
return len(self.vocab)
def _lowercase (self : List[Any]) -> List[Any]:
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__(self : Optional[Any]) -> List[str]:
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[int] = None
return state
def __setstate__(self : Optional[Any] , _A : Dict) -> Union[str, Any]:
__snake_case : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : str = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def _lowercase (self : Dict , _A : int) -> Optional[int]:
return "".join((self.SP_CHAR_MAPPING.get(_A , _A) for c in text))
def _lowercase (self : Dict , _A : Union[str, Any] , _A : Optional[int]=False , _A : int=64 , _A : Any=0.1) -> Any:
if self.sp_model_kwargs.get('enable_sampling') is True:
__snake_case : Dict = True
if self.sp_model_kwargs.get('alpha') is not None:
__snake_case : List[str] = self.sp_model_kwargs.get('alpha')
if self.sp_model_kwargs.get('nbest_size') is not None:
__snake_case : Dict = self.sp_model_kwargs.get('nbest_size')
if not enable_sampling:
__snake_case : Tuple = self.sp_model.EncodeAsPieces(_A)
else:
__snake_case : Optional[Any] = self.sp_model.SampleEncodeAsPieces(_A , _A , _A)
__snake_case : Tuple = []
for pi, piece in enumerate(_A):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A) and pi != 0:
new_pieces.append(_A)
continue
else:
continue
__snake_case : Tuple = 0
for i, chunk in enumerate(_A):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A) or self.is_punct(_A):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(_A)
__snake_case : List[str] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
__snake_case : List[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
__snake_case : Union[str, Any] = i
if len(_A) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def _lowercase (self : Optional[int] , _A : List[Any]) -> Tuple:
__snake_case : List[str] = ''.join(_A).replace(_A , ' ').strip()
return out_string
def _lowercase (self : Tuple , _A : Optional[Any]) -> List[str]:
__snake_case : Tuple = self.convert_ids_to_tokens(_A)
__snake_case : List[Any] = ''.join(_A).replace(_A , ' ').strip()
return out_string
def _lowercase (self : Optional[int] , _A : Optional[Any]) -> List[str]:
return self.vocab.get(_A , self.vocab.get(self.unk_token))
def _lowercase (self : str , _A : Any) -> List[Any]:
return self.reverse_vocab.get(_A , self.unk_token)
def _lowercase (self : Tuple , _A : List[Any] , _A : Optional[Any]=None) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase (self : List[Any] , _A : List[Any] , _A : Tuple=None) -> List[str]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase (self : Union[str, Any] , _A : Dict , _A : Dict=None , _A : str=False) -> List[str]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1]
return [1] + ([0] * len(_A)) + [1]
def _lowercase (self : Tuple , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A) + 1) + [1] * (len(_A) + 3)
def _lowercase (self : Union[str, Any] , _A : List[Any]) -> Optional[int]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase (self : Union[str, Any] , _A : List[Any]) -> Any:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase (self : Optional[int] , _A : List[str]) -> Union[str, Any]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase (self : List[str] , _A : Optional[int]) -> List[Any]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A) == 1:
__snake_case : List[str] = unicodedata.category(_A)
if cat == "Zs":
return True
return False
def _lowercase (self : Optional[int] , _A : Optional[Any]) -> int:
__snake_case : Any = {}
with io.open(_A , 'r' , encoding='utf-8') as f:
for index, line in enumerate(_A):
__snake_case : Optional[Any] = line.rstrip('\n')
__snake_case : Dict = int(_A)
return token_to_idx
def _lowercase (self : List[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : Union[str, Any] = 0
if os.path.isdir(_A):
__snake_case : Dict = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
__snake_case : List[str] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_A , 'w' , encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!')
__snake_case : List[str] = token_index
writer.write(token + '\n')
index += 1
__snake_case : int = os.path.join(_A , 'sentencepiece.bpe.model')
with open(_A , 'wb') as fi:
__snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_A)
return (vocab_file,)
| 172 | """simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int]= False
_a : int= False
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Any:
__snake_case : Any = parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_A , required=_A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_A , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_A , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_A , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_A , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_A , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_A , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_A , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_A , default=32 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_A , default=64 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_A , default=3E-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_A , default=1E-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_A)
def __init__(self : int , _A : Namespace) -> Tuple:
__snake_case : Optional[int] = logging.get_logger('transformers-cli/training')
__snake_case : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_A)
__snake_case : List[Any] = args.output
__snake_case : Any = args.column_label
__snake_case : str = args.column_text
__snake_case : Any = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
__snake_case : List[str] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
__snake_case : List[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
__snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = args.validation_split
__snake_case : str = args.train_batch_size
__snake_case : Any = args.valid_batch_size
__snake_case : Union[str, Any] = args.learning_rate
__snake_case : str = args.adam_epsilon
def _lowercase (self : List[str]) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase (self : str) -> int:
raise NotImplementedError
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 172 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : list , snake_case : int | None = None , snake_case : int | None = None )-> None:
if start is None:
_lowerCamelCase = 0
if end is None:
_lowerCamelCase = len(snake_case ) - 1
if start >= end:
return
_lowerCamelCase = (start + end) // 2
slowsort(snake_case , snake_case , snake_case )
slowsort(snake_case , mid + 1 , snake_case )
if sequence[end] < sequence[mid]:
_lowerCamelCase , _lowerCamelCase = sequence[mid], sequence[end]
slowsort(snake_case , snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 369 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : Any =None
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[Any] ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
A_ : Union[str, Any] ="""▁"""
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 80 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[int] = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = IMAGENET_DEFAULT_MEAN , __A = IMAGENET_DEFAULT_STD , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Optional[int] = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase_ :Optional[Any] = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase_ :Dict = get_size_dict(__A , param_name="""crop_size""" )
lowerCAmelCase_ :int = do_resize
lowerCAmelCase_ :Optional[Any] = size
lowerCAmelCase_ :str = resample
lowerCAmelCase_ :str = do_center_crop
lowerCAmelCase_ :List[Any] = crop_size
lowerCAmelCase_ :Optional[Any] = do_rescale
lowerCAmelCase_ :Optional[int] = rescale_factor
lowerCAmelCase_ :Any = do_normalize
lowerCAmelCase_ :List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ :Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :Any = get_size_dict(__A , default_to_square=__A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCAmelCase_ :Optional[Any] = int((256 / 224) * size["""shortest_edge"""] )
lowerCAmelCase_ :Tuple = get_resize_output_image_size(__A , size=__A , default_to_square=__A )
lowerCAmelCase_ :str = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__A , size=(size_dict["""height"""], size_dict["""width"""]) , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :Any = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
lowerCAmelCase_ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase_ :Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Any = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :List[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :List[Any] = size if size is not None else self.size
lowerCAmelCase_ :str = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ :int = get_size_dict(__A , param_name="""crop_size""" )
lowerCAmelCase_ :Optional[Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :Any = [to_numpy_array(__A ) for image in images]
if do_resize:
lowerCAmelCase_ :Tuple = [self.resize(__A , __A , __A ) for image in images]
if do_center_crop:
lowerCAmelCase_ :int = [self.center_crop(__A , __A ) for image in images]
if do_rescale:
lowerCAmelCase_ :Optional[int] = [self.rescale(__A , __A ) for image in images]
if do_normalize:
lowerCAmelCase_ :List[str] = [self.normalize(__A , __A , __A ) for image in images]
lowerCAmelCase_ :Tuple = [to_channel_dimension_format(__A , __A ) for image in images]
lowerCAmelCase_ :List[str] = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=13 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Dict=[1, 2, 1] , _UpperCAmelCase : Dict=[2, 2, 4] , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[Any]=2.0 , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-5 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : Optional[Any]=8 , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = patch_norm
_A = layer_norm_eps
_A = initializer_range
_A = is_training
_A = scope
_A = use_labels
_A = type_sequence_label_size
_A = encoder_stride
def lowerCAmelCase_ ( self : int ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any ):
_A = SwinvaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ):
_A = SwinvaForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_A = 1
_A = SwinvaForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ):
_A = self.type_sequence_label_size
_A = SwinvaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCAmelCase : Dict = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Any = False
UpperCAmelCase : Tuple = False
def lowerCAmelCase_ ( self : str ):
_A = SwinvaModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def lowerCAmelCase_ ( self : int ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Tuple ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : Dict ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.attentions
_A = len(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = config.window_size**2
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_A = len(_UpperCAmelCase )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
_A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_A = 2
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase ) )
_A = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ):
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.hidden_states
_A = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# Swinv2 has a different seq_length
_A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_A = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_A , _A , _A , _A = reshaped_hidden_states[0].shape
_A = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = SwinvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_A = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Dict ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_UpperCAmelCase )
_A = self.default_image_processor
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
# verify the logits
_A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_A = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 271 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _snake_case ( _snake_case : int = 8 ) -> str:
'''simple docstring'''
_A = ascii_letters + digits + punctuation
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
i -= len(_snake_case )
_A = i // 3
_A = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_A = (
chars_incl
+ random(_snake_case , quotient + remainder )
+ random(_snake_case , _snake_case )
+ random(_snake_case , _snake_case )
)
_A = list(_snake_case )
shuffle(_snake_case )
return "".join(_snake_case )
# random is a generalised function for letters, characters and numbers
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Any , _snake_case : str ) -> Dict:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : str , _snake_case : int = 8 ) -> bool:
'''simple docstring'''
if len(_snake_case ) < min_length:
# Your Password must be at least 8 characters long
return False
_A = any(char in ascii_uppercase for char in password )
_A = any(char in ascii_lowercase for char in password )
_A = any(char in digits for char in password )
_A = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = int(input('Please indicate the max length of your password: ' ).strip() )
_A = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_snake_case ) )
print(
'Alternative Password generated:' , alternative_password_generator(_snake_case , _snake_case ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 271 | 1 |
from __future__ import annotations
from collections import namedtuple
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
import math
def A_ ( a , a = 0 , a = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = end or len(a )
for i in range(a , a ):
SCREAMING_SNAKE_CASE_ : List[Any] = i
SCREAMING_SNAKE_CASE_ : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE_ : Tuple = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE_ : str = temp_index_value
return array
def A_ ( a , a , a ): # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = index
SCREAMING_SNAKE_CASE_ : str = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE_ : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE_ : Any = right_index
if largest != index:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[largest], array[index]
heapify(a , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(a )
for i in range(n // 2 , -1 , -1 ):
heapify(a , a , a )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[0], array[i]
heapify(a , 0 , a )
return array
def A_ ( a , a , a , a ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = low
SCREAMING_SNAKE_CASE_ : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[j], array[i]
i += 1
def A_ ( a ):
"""simple docstring"""
if len(a ) == 0:
return array
SCREAMING_SNAKE_CASE_ : Any = 2 * math.ceil(math.loga(len(a ) ) )
SCREAMING_SNAKE_CASE_ : int = 1_6
return intro_sort(a , 0 , len(a ) , a , a )
def A_ ( a , a , a , a , a ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a )
max_depth -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE_ : Dict = partition(a , a , a , a )
intro_sort(a , a , a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = p
return insertion_sort(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = input('Enter numbers separated by a comma : ').strip()
lowerCAmelCase : Optional[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 253 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = True , __magic_name__ = None , __magic_name__ = True , __magic_name__ = 1 / 2_55 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> None:
super().__init__(**__magic_name__ )
_a = size if size is not None else {'shortest_edge': 2_24}
_a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
_a = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ , param_name='crop_size' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray:
_a = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__magic_name__ , size=size['shortest_edge'] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray:
_a = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__magic_name__ , size=(size['height'], size['width']) , data_format=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> List[Any]:
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ) -> np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__magic_name__ , param_name='size' , default_to_square=__magic_name__ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__magic_name__ , param_name='crop_size' , default_to_square=__magic_name__ )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__magic_name__ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
_a = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
_a = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
_a = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
_a = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
_a = {'pixel_values': images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 104 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""input_features"""]
def __init__( self , __magic_name__=80 , __magic_name__=1_60_00 , __magic_name__=1_60 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=0.0 , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__magic_name__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__magic_name__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self , __magic_name__ ) -> np.ndarray:
_a = spectrogram(
__magic_name__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_a = log_spec[:, :-1]
_a = np.maximum(__magic_name__ , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_a = np.array(__magic_name__ , np.intaa )
_a = []
for vector, length in zip(__magic_name__ , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__magic_name__ )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "max_length" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
_a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_a = self.pad(
__magic_name__ , padding=__magic_name__ , max_length=max_length if max_length else self.n_samples , truncation=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__magic_name__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , __magic_name__ ):
_a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 104 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = XGLMConfig
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu"""
def __init__( self , lowercase_ , lowercase_=14 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = ffn_dim
UpperCAmelCase_ : int = activation_function
UpperCAmelCase_ : List[str] = activation_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Tuple = 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase_ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
UpperCAmelCase_ : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Any = config_and_inputs
UpperCAmelCase_ : List[str] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = TFXGLMModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self , lowercase_=True ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase_ : Tuple = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
UpperCAmelCase_ : List[str] = model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : Any = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer("Today is a nice day and" , return_tensors="tf" )
UpperCAmelCase_ : Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
UpperCAmelCase_ : List[Any] = model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
UpperCAmelCase_ : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase_ : Dict = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : Tuple = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase_ : Any = "left"
# use different length sentences to test batching
UpperCAmelCase_ : List[str] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
UpperCAmelCase_ : List[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ )
UpperCAmelCase_ : Optional[int] = inputs["input_ids"]
UpperCAmelCase_ : Union[str, Any] = model.generate(input_ids=lowercase_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
UpperCAmelCase_ : Optional[int] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : Optional[Any] = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase_ : List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCAmelCase_ : List[Any] = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase_ : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 61 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase_ : Any = re.compile(r'\s+')
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 ):
"""simple docstring"""
A_ : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
A_ : List[str] = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 ):
"""simple docstring"""
A_ : Any = ['unit tests', 'test file', 'configuration file']
A_ : Dict = example['content'].splitlines()
A_ : List[Any] = 0
A_ : str = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : Tuple = example['content'].count('\n' )
A_ : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = ['def ', 'class ', 'for ', 'while ']
A_ : Tuple = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=4 ):
"""simple docstring"""
A_ : Union[str, Any] = example['content'].splitlines()
A_ : Any = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
A_ : Dict = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
lowerCamelCase_ : Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : int = multiprocessing.cpu_count()
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase_ : Tuple = time.time()
lowerCamelCase_ : Tuple = load_dataset(args.dataset_name, split='train')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase_ : int = set(ds.unique('hash'))
lowerCamelCase_ : Union[str, Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase_ : Optional[int] = time.time()
lowerCamelCase_ : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase_ : Union[str, Any] = time.time()
lowerCamelCase_ , lowerCamelCase_ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase_ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase_ : Optional[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCamelCase_ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase_ : Optional[int] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 286 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
if isinstance(_lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __snake_case :
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase ,_lowerCAmelCase ,f"Difference between torch and flax is {diff} (>= {tol})." )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase )
lowercase : List[str] = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
lowercase : Optional[Any] = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], config.projection_dim) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
lowercase , lowercase : List[Any] = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase )
lowercase : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
lowercase : int = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
lowercase , lowercase : Any = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase )
lowercase : List[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
lowercase : Any = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
lowercase : Union[str, Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
lowercase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
lowercase : List[str] = model(input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )
lowercase : Optional[int] = after_output[0]
lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
lowercase , lowercase : List[str] = self.get_vision_text_model(_lowerCAmelCase ,_lowerCAmelCase )
lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
lowercase : Optional[int] = model(
input_ids=_lowerCAmelCase ,pixel_values=_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,output_attentions=_lowerCAmelCase )
lowercase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : str = to_atuple(vision_model.config.image_size )
lowercase : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowercase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
pt_model.to(_lowerCAmelCase )
pt_model.eval()
# prepare inputs
lowercase : str = inputs_dict
lowercase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase : int = pt_model(**_lowerCAmelCase ).to_tuple()
lowercase : Optional[int] = fx_model(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase ,pt_output.numpy() ,4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowerCAmelCase )
lowercase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ,from_pt=_lowerCAmelCase )
lowercase : Union[str, Any] = fx_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowerCAmelCase ,pt_output.numpy() ,4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowerCAmelCase )
lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase ,from_flax=_lowerCAmelCase )
pt_model_loaded.to(_lowerCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowercase : str = pt_model_loaded(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) ,"""Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowerCAmelCase ,pt_output_loaded.numpy() ,4e-2 )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase )
lowercase : List[Any] = VisionTextDualEncoderModel(_lowerCAmelCase )
lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
lowercase : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,_lowerCAmelCase )
lowercase : int = fx_state
self.check_pt_flax_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase ,_lowerCAmelCase )
lowercase : Tuple = VisionTextDualEncoderModel(_lowerCAmelCase )
lowercase : List[str] = FlaxVisionTextDualEncoderModel(_lowerCAmelCase )
lowercase : Optional[Any] = load_flax_weights_in_pytorch_model(_lowerCAmelCase ,fx_model.params )
self.check_pt_flax_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase : Dict = config_inputs_dict.pop("""vision_config""" )
lowercase : int = config_inputs_dict.pop("""text_config""" )
lowercase : Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
self.check_equivalence_flax_to_pt(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : int = self.get_pretrained_model_and_inputs()
lowercase : Tuple = model_a(**_lowerCAmelCase )
lowercase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
lowercase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
lowercase : List[str] = model_a(**_lowerCAmelCase )
lowercase : List[Any] = after_outputs[0]
lowercase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase ,1e-5 )
@require_flax
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" ,"""hf-internal-testing/tiny-bert""" ,vision_from_pt=_lowerCAmelCase ,text_from_pt=_lowerCAmelCase ,)
lowercase : Dict = 13
lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase : int = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
lowercase : Dict = random_attention_mask([batch_size, 4] )
lowercase : List[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = FlaxViTModel(_lowerCAmelCase )
lowercase : List[str] = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = FlaxViTModelTester(self )
lowercase : int = FlaxBertModelTester(self )
lowercase : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowercase , lowercase : Any = vision_config_and_inputs
lowercase , lowercase , lowercase , lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" ,"""hf-internal-testing/tiny-bert""" ,vision_from_pt=_lowerCAmelCase ,text_from_pt=_lowerCAmelCase ,)
lowercase : List[str] = 13
lowercase : List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase : List[Any] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
lowercase : str = random_attention_mask([batch_size, 4] )
lowercase : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = FlaxCLIPVisionModel(_lowerCAmelCase )
lowercase : int = FlaxBertModel(_lowerCAmelCase )
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = FlaxCLIPVisionModelTester(self )
lowercase : Optional[Any] = FlaxBertModelTester(self )
lowercase : int = clip_model_tester.prepare_config_and_inputs()
lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowercase , lowercase : Dict = vision_config_and_inputs
lowercase , lowercase , lowercase , lowercase : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" ,logit_scale_init_value=1.0 )
lowercase : Dict = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase : int = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] ,images=_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""np""" )
lowercase : Optional[Any] = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
lowercase : List[str] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,_lowerCAmelCase ,atol=1e-3 ) )
| 354 |
from bisect import bisect
from itertools import accumulate
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Dict = sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[Any] = [i[0] for i in r], [i[1] for i in r]
lowercase : Any = list(accumulate(SCREAMING_SNAKE_CASE__ ) )
lowercase : int = bisect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = [1]
for i in range(2 , lowerCamelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Any = list(range(lowerCamelCase_ ) )
# Find permutation
while factorials:
_lowerCamelCase : str = factorials.pop()
_lowerCamelCase, _lowerCamelCase : Tuple = divmod(lowerCamelCase_ , lowerCamelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''num_attention_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : str=13, lowerCamelCase : Union[str, Any]=64, lowerCamelCase : str=3, lowerCamelCase : int=3, lowerCamelCase : Dict=2, lowerCamelCase : int=1, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Dict=[128, 256, 384], lowerCamelCase : Tuple=[4, 6, 8], lowerCamelCase : Optional[Any]=[2, 3, 4], lowerCamelCase : str=[16, 16, 16], lowerCamelCase : Dict=0, lowerCamelCase : List[str]=[2, 2, 2], lowerCamelCase : str=[2, 2, 2], lowerCamelCase : List[Any]=0.02, lowerCamelCase : Any=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=2, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = LevitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowercase__ = problem_type['''title''']
lowercase__ = problem_type['''num_labels''']
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase ) as warning_list:
lowercase__ = model(**lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : int ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Any=18 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Tuple=[0.5, 0.5, 0.5] , UpperCAmelCase_ : List[str]=False , ):
SCREAMING_SNAKE_CASE__ = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_reduce_labels
def A_ ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _lowercase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[0]['file'] )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[1]['file'] )
return image, map
def _lowercase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE__ = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[1]['file'] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[2]['file'] )
SCREAMING_SNAKE_CASE__ = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Optional[Any] =BeitImageProcessor if is_vision_available() else None
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = BeitImageProcessingTester(self )
@property
def A_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
def A_ ( self : Tuple ):
pass
def A_ ( self : Union[str, Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_ ( self : Any ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def A_ ( self : List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 169 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case = logging.getLogger(__name__)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
SCREAMING_SNAKE_CASE__ = []
# custom device map
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ = get_keys_to_not_convert(UpperCamelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase_ )
# compatibility with peft
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = get_parameter_device(UpperCamelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(UpperCamelCase_ , UpperCamelCase_ , modules_to_not_convert=UpperCamelCase_ )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ = name.replace('.weight' , '' ).replace('.bias' , '' )
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase_ ):
param.to(UpperCamelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , modules_to_not_convert=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = get_quantized_model_device_map(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , max_memory=UpperCamelCase_ , no_split_module_classes=UpperCamelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase_ , offload_state_dict=UpperCamelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase_ , device_map=UpperCamelCase_ , offload_dir=UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
SCREAMING_SNAKE_CASE__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = special_dtypes
SCREAMING_SNAKE_CASE__ = no_split_module_classes
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ = get_balanced_memory(
UpperCamelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = max_memory
SCREAMING_SNAKE_CASE__ = infer_auto_device_map(UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ = []
current_key_name.append(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ = '.'.join(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
SCREAMING_SNAKE_CASE__ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ = find_tied_parameters(UpperCamelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase_ , [] )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ = False
if hasattr(UpperCamelCase_ , 'base_model_prefix' ):
SCREAMING_SNAKE_CASE__ = not hasattr(UpperCamelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ = list(model.named_children() )
SCREAMING_SNAKE_CASE__ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ = set(UpperCamelCase_ ) - set(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCamelCase_ ) ) + list(UpperCamelCase_ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ = ['.weight', '.bias']
SCREAMING_SNAKE_CASE__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ = name.replace(UpperCamelCase_ , '' )
filtered_module_names.append(UpperCamelCase_ )
return filtered_module_names
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
for m in model.modules():
if isinstance(UpperCamelCase_ , bnb.nn.Linearabit ):
return True
return False
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
return next(parameter.parameters() ).device
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase_ , UpperCamelCase_ , 0 , dtype=UpperCamelCase_ , value=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = param_name
SCREAMING_SNAKE_CASE__ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ = tensor_name.split('.' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
SCREAMING_SNAKE_CASE__ = new_module
SCREAMING_SNAKE_CASE__ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase_ , UpperCamelCase_ , index=UpperCamelCase_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , UpperCamelCase_ , index=UpperCamelCase_ , )
else:
offload_weight(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index=UpperCamelCase_ )
offload_weight(UpperCamelCase_ , param_name.replace('weight' , 'SCB' ) , UpperCamelCase_ , index=UpperCamelCase_ )
set_module_tensor_to_device(UpperCamelCase_ , UpperCamelCase_ , 'meta' , dtype=UpperCamelCase_ , value=torch.empty(*param.size() ) )
| 169 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( a__):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , 'width_multiplier' ) )
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE="swish" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.25 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = conv_kernel_size
SCREAMING_SNAKE_CASE_ : Dict = output_stride
SCREAMING_SNAKE_CASE_ : List[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = scope
SCREAMING_SNAKE_CASE_ : List[Any] = width_multiplier
SCREAMING_SNAKE_CASE_ : Optional[int] = ffn_dropout
SCREAMING_SNAKE_CASE_ : int = attn_dropout
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( a__ , a__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Any = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = MobileViTVaModelTester(self )
SCREAMING_SNAKE_CASE_ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def UpperCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE_ : str = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase):
@cached_property
def UpperCAmelCase ( self ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**__lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ : int = model.to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ : List[str] = model.to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 253 | '''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a: Tuple = None
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a: Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a: Tuple = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Dict = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[int] = False if not self.vocab_file else True
lowercase__ : Any = extra_ids
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase( self ) -> List[Any]:
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCAmelCase( self ) -> Tuple:
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 198 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowercase ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
a__ : List[Any] = "table-transformer"
a__ : Optional[int] = ["past_key_values"]
a__ : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Tuple , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : int=100 , __UpperCAmelCase : str=6 , __UpperCAmelCase : List[str]=2_048 , __UpperCAmelCase : Optional[Any]=8 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Union[str, Any]=8 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[int]="relu" , __UpperCAmelCase : Tuple=256 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : List[Any]=1.0 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]="sine" , __UpperCAmelCase : int="resnet50" , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : List[str]=5 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : str=1 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Tuple=5 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : int=0.1 , **__UpperCAmelCase : List[str] , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase_= CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= backbone_config.get("""model_type""" )
UpperCAmelCase_= CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_= config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
UpperCAmelCase_= None, None, None
UpperCAmelCase_= use_timm_backbone
UpperCAmelCase_= backbone_config
UpperCAmelCase_= num_channels
UpperCAmelCase_= num_queries
UpperCAmelCase_= d_model
UpperCAmelCase_= encoder_ffn_dim
UpperCAmelCase_= encoder_layers
UpperCAmelCase_= encoder_attention_heads
UpperCAmelCase_= decoder_ffn_dim
UpperCAmelCase_= decoder_layers
UpperCAmelCase_= decoder_attention_heads
UpperCAmelCase_= dropout
UpperCAmelCase_= attention_dropout
UpperCAmelCase_= activation_dropout
UpperCAmelCase_= activation_function
UpperCAmelCase_= init_std
UpperCAmelCase_= init_xavier_std
UpperCAmelCase_= encoder_layerdrop
UpperCAmelCase_= decoder_layerdrop
UpperCAmelCase_= encoder_layers
UpperCAmelCase_= auxiliary_loss
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= backbone
UpperCAmelCase_= use_pretrained_backbone
UpperCAmelCase_= dilation
# Hungarian matcher
UpperCAmelCase_= class_cost
UpperCAmelCase_= bbox_cost
UpperCAmelCase_= giou_cost
# Loss coefficients
UpperCAmelCase_= mask_loss_coefficient
UpperCAmelCase_= dice_loss_coefficient
UpperCAmelCase_= bbox_loss_coefficient
UpperCAmelCase_= giou_loss_coefficient
UpperCAmelCase_= eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.d_model
class lowercase ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
a__ : Dict = version.parse("1.11")
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1E-5
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return 12
| 351 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_= DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __UpperCAmelCase ):
UpperCAmelCase_= (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase_= (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , eta=__UpperCAmelCase , use_clipped_model_output=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 277 | 0 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = BertJapaneseTokenizer
lowerCAmelCase : Dict = False
lowerCAmelCase : str = True
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
super().setUp()
lowercase__ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : str = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ : Any = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.get_input_output_texts(_snake_case )
lowercase__ : int = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
lowercase__ : Optional[Any] = tokenizer.decode(_snake_case ,clean_up_tokenization_spaces=_snake_case )
return text, ids
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.tokenizer_class(self.vocab_file )
lowercase__ : Any = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_snake_case ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_snake_case )
lowercase__ : Optional[Any] = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ : Optional[int] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ : Dict = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(_snake_case ,'''wb''' ) as handle:
pickle.dump(_snake_case ,_snake_case )
with open(_snake_case ,'''rb''' ) as handle:
lowercase__ : int = pickle.load(_snake_case )
lowercase__ : Optional[Any] = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
try:
lowercase__ : Optional[Any] = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
try:
lowercase__ : Dict = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : int = MecabTokenizer(do_lower_case=_snake_case ,mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
lowercase__ : Tuple = MecabTokenizer(
do_lower_case=_snake_case ,normalize_text=_snake_case ,mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : str = MecabTokenizer(normalize_text=_snake_case ,mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] ,)
@require_sudachi
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_snake_case )
lowercase__ : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ : Optional[int] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(_snake_case ,'''wb''' ) as handle:
pickle.dump(_snake_case ,_snake_case )
with open(_snake_case ,'''rb''' ) as handle:
lowercase__ : Optional[int] = pickle.load(_snake_case )
lowercase__ : Dict = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
@require_sudachi
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : int = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国人''', '''参政権'''] )
@require_sudachi
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''' ,sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) ,['''外国人参政権'''] )
@require_sudachi
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = SudachiTokenizer(do_lower_case=_snake_case ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = SudachiTokenizer(normalize_text=_snake_case ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,[''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] ,)
@require_sudachi
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SudachiTokenizer(trim_whitespace=_snake_case ,sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] ,)
@require_jumanpp
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : str = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_snake_case )
lowercase__ : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ : List[Any] = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ : int = os.path.join(self.tmpdirname ,'''tokenizer.bin''' )
with open(_snake_case ,'''wb''' ) as handle:
pickle.dump(_snake_case ,_snake_case )
with open(_snake_case ,'''rb''' ) as handle:
lowercase__ : Optional[int] = pickle.load(_snake_case )
lowercase__ : Tuple = tokenizer_new.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
@require_jumanpp
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = JumanppTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Any = JumanppTokenizer(normalize_text=_snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] ,)
@require_jumanpp
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : int = JumanppTokenizer(trim_whitespace=_snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) ,['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] ,)
@require_jumanpp
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) ,['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] ,)
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase__ : Dict = {}
for i, token in enumerate(_snake_case ):
lowercase__ : Any = i
lowercase__ : Any = WordpieceTokenizer(vocab=_snake_case ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) ,['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) ,['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) ,['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowercase__ : Union[str, Any] = tokenizer.subword_tokenizer
lowercase__ : Tuple = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_snake_case ,['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowercase__ : List[Any] = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_snake_case ,['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Any = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowercase__ : List[Any] = tokenizer.encode('''ありがとう。''' ,add_special_tokens=_snake_case )
lowercase__ : Optional[Any] = tokenizer.encode('''どういたしまして。''' ,add_special_tokens=_snake_case )
lowercase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowercase__ : str = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = BertJapaneseTokenizer
lowerCAmelCase : Tuple = False
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
lowercase__ : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase ( self : Optional[Any] ,**_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type='''character''' ,**_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ : Optional[int] = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : int = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type='''character''' )
lowercase__ : Union[str, Any] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_snake_case ,['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ : Optional[Any] = {}
for i, token in enumerate(_snake_case ):
lowercase__ : List[str] = i
lowercase__ : Optional[Any] = CharacterTokenizer(vocab=_snake_case ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) ,['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) ,['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowercase__ : Dict = tokenizer.encode('''ありがとう。''' ,add_special_tokens=_snake_case )
lowercase__ : Union[str, Any] = tokenizer.encode('''どういたしまして。''' ,add_special_tokens=_snake_case )
lowercase__ : str = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = '''cl-tohoku/bert-base-japanese'''
lowercase__ : List[str] = AutoTokenizer.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' ,level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowercase__ : List[str] = '''bert-base-cased'''
with self.assertLogs('''transformers''' ,level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 16 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a_ : str = logging.get_logger(__name__)
a_ : Tuple = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """layoutlmv3"""
def __init__( self , __magic_name__=5_02_65 , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=10_24 , __magic_name__=1_28 , __magic_name__=1_28 , __magic_name__=True , __magic_name__=32 , __magic_name__=1_28 , __magic_name__=64 , __magic_name__=2_56 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=2_24 , __magic_name__=3 , __magic_name__=16 , __magic_name__=None , **__magic_name__ , ) -> Dict:
super().__init__(
vocab_size=__magic_name__ , hidden_size=__magic_name__ , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , intermediate_size=__magic_name__ , hidden_act=__magic_name__ , hidden_dropout_prob=__magic_name__ , attention_probs_dropout_prob=__magic_name__ , max_position_embeddings=__magic_name__ , type_vocab_size=__magic_name__ , initializer_range=__magic_name__ , layer_norm_eps=__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
_a = max_ad_position_embeddings
_a = coordinate_size
_a = shape_size
_a = has_relative_attention_bias
_a = rel_pos_bins
_a = max_rel_pos
_a = has_spatial_attention_bias
_a = rel_ad_pos_bins
_a = max_rel_ad_pos
_a = text_embed
_a = visual_embed
_a = input_size
_a = num_channels
_a = patch_size
_a = classifier_dropout
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = version.parse("""1.12""" )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def __UpperCAmelCase ( self ) -> int:
return 12
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , __magic_name__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = processor.tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_a = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_a = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = dict(
processor(
__magic_name__ , text=__magic_name__ , boxes=__magic_name__ , return_tensors=__magic_name__ , ) )
return inputs
| 168 | 0 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a__ :
def __init__( self : List[Any] , a : Tuple , a : int , a : int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(a )][self.get_x(a )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase =8_0_0, 6_0_0
__UpperCAmelCase =imread("image_data/lena.jpg", 1)
__UpperCAmelCase =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 237 | '''simple docstring'''
from bisect import bisect
from itertools import accumulate
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase = list(accumulate(UpperCamelCase__ ) )
__lowerCamelCase = bisect(UpperCamelCase__ , UpperCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : int = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase_ : Dict = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ : List[Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main() | 262 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 1 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( a :Tuple ) -> int:
a = tmp_path / '''file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :int ) -> List[str]:
a = tmp_path / '''malformed_file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Dict , a :int ) -> List[str]:
a = tmp_path / '''csv_with_image.csv'''
a = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :List[Any] ) -> Dict:
a = tmp_path / '''csv_with_label.csv'''
a = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Tuple ) -> Any:
a = tmp_path / '''csv_with_int_list.csv'''
a = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> List[Any]:
a = Csv()
a = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def _a ( a :Dict ) -> Any:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1]
a = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a = csv._generate_tables([[csv_file_with_image]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( a :Any ) -> Tuple:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1:]
a = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a = csv._generate_tables([[csv_file_with_label]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
a = csv._generate_tables([[csv_file_with_int_list]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self._create_example_records()
A__ = Dataset.from_list(_SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(_SCREAMING_SNAKE_CASE):
self.assertDictEqual(_SCREAMING_SNAKE_CASE , example_records[i])
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self._create_example_records()
A__ = Dataset.from_list(_SCREAMING_SNAKE_CASE)
A__ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: # checks what happens with missing columns
'''simple docstring'''
A__ = [{"""col_1""": 1}, {"""col_2""": """x"""}]
A__ = Dataset.from_list(_SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: # checks if the type can be inferred from the second record
'''simple docstring'''
A__ = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
A__ = Dataset.from_list(_SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = Dataset.from_list([])
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 14 |
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def _snake_case ( UpperCamelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case ( UpperCamelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case ( UpperCamelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case ( ): # Main function for testing.
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : Any = Node(3 )
UpperCAmelCase : Optional[int] = Node(4 )
UpperCAmelCase : Any = Node(5 )
UpperCAmelCase : Optional[int] = Node(6 )
UpperCAmelCase : int = Node(7 )
UpperCAmelCase : str = Node(8 )
UpperCAmelCase : str = Node(9 )
print(is_full_binary_tree(UpperCamelCase ) )
print(depth_of_tree(UpperCamelCase ) )
print("""Tree is: """ )
display(UpperCamelCase )
if __name__ == "__main__":
main()
| 109 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
UpperCamelCase__ : Union[str, Any] = img
UpperCamelCase__ : Dict = img.shape[1]
UpperCamelCase__ : int = img.shape[0]
UpperCamelCase__ : Union[str, Any] = dst_width
UpperCamelCase__ : Any = dst_height
UpperCamelCase__ : str = self.src_w / self.dst_w
UpperCamelCase__ : Optional[int] = self.src_h / self.dst_h
UpperCamelCase__ : int = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __lowercase ( self : Any ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCamelCase__ : Any = self.img[self.get_y(SCREAMING_SNAKE_CASE )][self.get_x(SCREAMING_SNAKE_CASE )]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase : str =800, 600
lowerCamelCase : Optional[int] =imread('''image_data/lena.jpg''', 1)
lowerCamelCase : str =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows() | 196 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : List[Any] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __a ( A__ , A__ ):
_lowerCAmelCase : Union[str, Any] = '''bit'''
_lowerCAmelCase : List[str] = ['''preactivation''', '''bottleneck''']
_lowerCAmelCase : Any = ['''SAME''', '''VALID''']
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=64 , SCREAMING_SNAKE_CASE : List[Any]=[2_56, 5_12, 10_24, 20_48] , SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE : str="preactivation" , SCREAMING_SNAKE_CASE : Any="relu" , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCamelCase__ : Any = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Dict = embedding_size
UpperCamelCase__ : Tuple = hidden_sizes
UpperCamelCase__ : Any = depths
UpperCamelCase__ : Optional[int] = layer_type
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : str = global_padding
UpperCamelCase__ : Any = num_groups
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Optional[Any] = embedding_dynamic_padding
UpperCamelCase__ : Tuple = output_stride
UpperCamelCase__ : List[str] = width_factor
UpperCamelCase__ : Any = ["stem"] + [F'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE , out_indices=SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) | 196 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
UpperCamelCase__ : Optional[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201 | 1 |
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
A: Optional[int] = 0
A: Dict = len(__lowercase )
for i in range(n - 1 ):
for j in range(i + 1 , __lowercase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if len(__lowercase ) <= 1:
return arr, 0
A: List[Any] = len(__lowercase ) // 2
A: Optional[Any] = arr[0:mid]
A: Optional[Any] = arr[mid:]
A: Union[str, Any] = count_inversions_recursive(__lowercase )
A: Any = count_inversions_recursive(__lowercase )
A: int = _count_cross_inversions(__lowercase , __lowercase )
A: Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[int]:
A: List[str] = []
A: Dict = 0
while i < len(__lowercase ) and j < len(__lowercase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__lowercase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__lowercase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: Optional[int] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A: List[Any] = count_inversions_bf(__lowercase )
A: Union[str, Any] = count_inversions_recursive(__lowercase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __lowercase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A: Optional[int] = count_inversions_bf(__lowercase )
A: Dict = count_inversions_recursive(__lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __lowercase )
# an empty list should also have zero inversions
A: List[Any] = []
A: List[str] = count_inversions_bf(__lowercase )
A: List[Any] = count_inversions_recursive(__lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __lowercase )
if __name__ == "__main__":
main()
| 356 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = DPRContextEncoderTokenizer
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer
UpperCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ :
'''simple docstring'''
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
elif titles is None or texts is None:
A: Union[str, Any] = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles]
A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts]
A: str = len(SCREAMING_SNAKE_CASE_ )
A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE_ ) == len(
SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts."""
A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
A: str = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
}
if return_attention_mask is not False:
A: Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A: Optional[Any] = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A: Any = reader_input['''input_ids''']
A , A , A: str = reader_output[:3]
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ )
A: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A: List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A: Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
A: int = len(SCREAMING_SNAKE_CASE_ )
A: Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A: Union[str, Any] = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
A: Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A: int = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
| 334 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "roc_bert"
def __init__( self : int , lowercase : Optional[Any]=30_522 , lowercase : Tuple=768 , lowercase : Optional[int]=12 , lowercase : Union[str, Any]=12 , lowercase : List[str]=3_072 , lowercase : Dict="gelu" , lowercase : List[Any]=0.1 , lowercase : List[str]=0.1 , lowercase : Dict=512 , lowercase : List[str]=2 , lowercase : Optional[int]=0.02 , lowercase : int=1E-12 , lowercase : List[str]=True , lowercase : int=0 , lowercase : str="absolute" , lowercase : Tuple=None , lowercase : str=True , lowercase : Union[str, Any]=True , lowercase : Dict=768 , lowercase : Optional[Any]=910 , lowercase : List[str]=512 , lowercase : int=24_858 , lowercase : List[str]=True , **lowercase : Dict , ):
'''simple docstring'''
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
_snake_case = use_cache
_snake_case = enable_pronunciation
_snake_case = enable_shape
_snake_case = pronunciation_embed_dim
_snake_case = pronunciation_vocab_size
_snake_case = shape_embed_dim
_snake_case = shape_vocab_size
_snake_case = concat_input
_snake_case = position_embedding_type
_snake_case = classifier_dropout
super().__init__(pad_token_id=lowercase , **lowercase ) | 282 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_SCREAMING_SNAKE_CASE : int = torch.zeros(__lowerCamelCase , __lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : List[str] = torch.nn.Parameter(__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=__lowerCamelCase , transformer=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else 1
# get prompt text embeddings
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_SCREAMING_SNAKE_CASE : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
_SCREAMING_SNAKE_CASE : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_SCREAMING_SNAKE_CASE : Optional[int] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowerCamelCase )
# duplicate text embeddings for each generation per prompt
_SCREAMING_SNAKE_CASE : Optional[int] = prompt_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCamelCase , 1 , 1 )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [''] * batch_size
_SCREAMING_SNAKE_CASE : Optional[int] = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" , )
_SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE : List[str] = negative_prompt_embeds.shape[1]
_SCREAMING_SNAKE_CASE : int = negative_prompt_embeds.repeat(1 , __lowerCamelCase , 1 )
_SCREAMING_SNAKE_CASE : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __lowerCamelCase , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 5.0 , __lowerCamelCase = 1.0 , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = 1 , ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = 1
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = len(__lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}""" )
_SCREAMING_SNAKE_CASE : str = batch_size * num_images_per_prompt
_SCREAMING_SNAKE_CASE : List[Any] = guidance_scale > 1.0
_SCREAMING_SNAKE_CASE : List[str] = self._encode_prompt(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase , __lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCamelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
_SCREAMING_SNAKE_CASE : Tuple = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_SCREAMING_SNAKE_CASE : Tuple = self.transformer.num_vector_embeds - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.full(__lowerCamelCase , __lowerCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_SCREAMING_SNAKE_CASE : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
_SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps.to(self.device )
_SCREAMING_SNAKE_CASE : Tuple = latents
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_SCREAMING_SNAKE_CASE : str = self.transformer(__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , timestep=__lowerCamelCase ).sample
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE : str = model_output.chunk(2 )
_SCREAMING_SNAKE_CASE : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCamelCase , dim=1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.truncate(__lowerCamelCase , __lowerCamelCase )
# remove `log(0)`'s (`-inf`s)
_SCREAMING_SNAKE_CASE : Optional[int] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE : Any = self.scheduler.step(__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.vqvae.config.vq_embed_dim
_SCREAMING_SNAKE_CASE : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_SCREAMING_SNAKE_CASE : Optional[int] = self.vqvae.quantize.get_codebook_entry(__lowerCamelCase , shape=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.vqvae.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase ).sample
_SCREAMING_SNAKE_CASE : Any = (image / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.sort(__lowerCamelCase , 1 , descending=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = torch.exp(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_SCREAMING_SNAKE_CASE : List[str] = torch.full_like(keep_mask[:, 0:1, :] , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = torch.cat((all_true, keep_mask) , dim=1 )
_SCREAMING_SNAKE_CASE : List[Any] = keep_mask[:, :-1, :]
_SCREAMING_SNAKE_CASE : str = keep_mask.gather(1 , indices.argsort(1 ) )
_SCREAMING_SNAKE_CASE : List[str] = log_p_x_0.clone()
_SCREAMING_SNAKE_CASE : List[str] = -torch.inf # -inf = log(0)
return rv | 362 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase ) | 325 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a_ ( lowerCamelCase : int = 8 ):
lowerCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCamelCase ) for _ in range(lowerCamelCase ) )
def a_ ( lowerCamelCase : str , lowerCamelCase : int ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCamelCase )
lowerCAmelCase = i // 3
lowerCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase = (
chars_incl
+ random(lowerCamelCase , quotient + remainder )
+ random(lowerCamelCase , lowerCamelCase )
+ random(lowerCamelCase , lowerCamelCase )
)
lowerCAmelCase = list(lowerCamelCase )
shuffle(lowerCamelCase )
return "".join(lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def a_ ( lowerCamelCase : str , lowerCamelCase : int ):
return "".join(secrets.choice(lowerCamelCase ) for _ in range(lowerCamelCase ) )
def a_ ( lowerCamelCase : str , lowerCamelCase : Tuple ):
pass # Put your code here...
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ):
pass # Put your code here...
def a_ ( lowerCamelCase : int , lowerCamelCase : Dict ):
pass # Put your code here...
def a_ ( lowerCamelCase : str , lowerCamelCase : int = 8 ):
if len(lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase = any(char in ascii_uppercase for char in password )
lowerCAmelCase = any(char in ascii_lowercase for char in password )
lowerCAmelCase = any(char in digits for char in password )
lowerCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a_ ( ):
lowerCAmelCase = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCAmelCase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowerCamelCase ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowerCamelCase , lowerCamelCase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 4 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=13 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[Any]=24 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = frequency_stride
UpperCAmelCase__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = ASTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : str = False
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = ASTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ASTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase__ , UpperCAmelCase__ = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_UpperCAmelCase )
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ = prepare_audio()
UpperCAmelCase__ = audio.squeeze().numpy()
UpperCAmelCase__ = feature_extractor(_UpperCAmelCase , sampling_rate=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 355 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCamelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = SavedModel()
__lowercase : str = []
with open(os.path.join(__UpperCamelCase , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
__lowercase : int = json.load(__UpperCamelCase )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCamelCase )] )
with open(__UpperCamelCase , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
__lowercase : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowercase : List[Any] = sorted(__UpperCamelCase )
__lowercase : Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCamelCase )
if strict and len(__UpperCamelCase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__UpperCamelCase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__UpperCamelCase , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 249 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 249 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: Tuple=3 , __lowerCamelCase: str=16 , __lowerCamelCase: Union[str, Any]=[32, 64, 1_28] , __lowerCamelCase: List[Any]=[1, 2, 1] , __lowerCamelCase: Dict=[2, 2, 4] , __lowerCamelCase: Dict=2 , __lowerCamelCase: Any=2.0 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: str=0.0 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Any=False , __lowerCamelCase: str=True , __lowerCamelCase: int=0.02 , __lowerCamelCase: Optional[Any]=1e-5 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=10 , __lowerCamelCase: Optional[int]=8 , __lowerCamelCase: Optional[int]=["stage1", "stage2"] , __lowerCamelCase: List[str]=[1, 2] , ) -> List[str]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Dict = embed_dim
__UpperCAmelCase : Tuple = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Tuple = num_heads
__UpperCAmelCase : List[Any] = window_size
__UpperCAmelCase : Tuple = mlp_ratio
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : int = drop_path_rate
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Dict = use_absolute_embeddings
__UpperCAmelCase : Tuple = patch_norm
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : str = encoder_stride
__UpperCAmelCase : Any = out_features
__UpperCAmelCase : Any = out_indices
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = FocalNetModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
__UpperCAmelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self: int , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] ) -> str:
__UpperCAmelCase : str = FocalNetBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Dict = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Optional[Any] = FocalNetBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] ) -> Tuple:
__UpperCAmelCase : int = FocalNetForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : str = 1
__UpperCAmelCase : Optional[int] = FocalNetForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : str = self.type_sequence_label_size
__UpperCAmelCase : List[str] = FocalNetForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: List[str] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: List[str] = False
lowerCamelCase__: Any = False
lowerCamelCase__: Dict = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: List[str] = False
def _lowerCamelCase ( self: Tuple ) -> Dict:
__UpperCAmelCase : List[str] = FocalNetModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=37 , has_text_modality=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> Any:
return
def _lowerCamelCase ( self: Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _lowerCamelCase ( self: Dict ) -> Tuple:
pass
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.hidden_states
__UpperCAmelCase : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# FocalNet has a different seq_length
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = reshaped_hidden_states[0].shape
__UpperCAmelCase : int = (
reshaped_hidden_states[0].view(__lowerCamelCase , __lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Dict = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : int = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width) )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> int:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = FocalNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: Optional[int] ) -> str:
__UpperCAmelCase : List[Any] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__UpperCAmelCase : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : Dict = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase__: Dict = FocalNetConfig
lowerCamelCase__: List[str] = False
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = FocalNetModelTester(self )
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
"""simple docstring"""
from math import factorial, pi
def __A ( a_ :float , a_ :int = 30) -> float:
if not isinstance(a_ , (int, float)):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''')
if not isinstance(a_ , a_) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''')
__a : int = float(a_)
__a : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(a_))
def __A ( a_ :float , a_ :int = 30) -> float:
if not isinstance(a_ , (int, float)):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''')
if not isinstance(a_ , a_) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''')
__a : Dict = float(a_)
__a : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(a_))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 160 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A = NewType('''DataClass''', Any)
A = NewType('''DataClassType''', Any)
def __A ( a_ :List[str]) -> Tuple:
if isinstance(a_ , a_):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""")
def __A ( a_ :list) -> Callable[[str], Any]:
__a : Any = {str(a_): choice for choice in choices}
return lambda a_: str_to_choice.get(a_ , a_)
def __A ( *,
a_ :Union[str, List[str]] = None , a_ :str = None , a_ :Any = dataclasses.MISSING , a_ :Callable[[], Any] = dataclasses.MISSING , a_ :dict = None , **a_ :str , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__a : List[Any] = {}
if aliases is not None:
__a : Optional[Any] = aliases
if help is not None:
__a : int = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 42
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__a : str = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
__a : int = [dataclass_types]
__a : Optional[Any] = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = f"""--{field.name}"""
__a : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__a : Dict = kwargs.pop('''aliases''' , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = [aliases]
__a : Tuple = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , '''UnionType''' ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__a : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__a : List[str] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__a : List[str] = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__a : Optional[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__a : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
__a : int = field.type.__args__
else:
__a : List[str] = [x.value for x in field.type]
__a : Any = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__a : Tuple = field.default
else:
__a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__a : Any = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__a : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__a : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__a : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
__a : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__a : List[Any] = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = field.type.__args__[0]
__a : Optional[int] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__a : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
__a : List[Any] = True
else:
__a : int = field.type
if field.default is not dataclasses.MISSING:
__a : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
__a : Optional[int] = field.default_factory()
else:
__a : Union[str, Any] = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__a : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , '''_argument_group_name''' ):
__a : Any = self.add_argument_group(dtype._argument_group_name )
else:
__a : Optional[Any] = self
try:
__a : Dict[str, type] = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
__a : Union[str, Any] = '''.'''.join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
__a : str = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__a : int = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__a : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__a , __a : List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase )
__a : Union[str, Any] = vars(_UpperCAmelCase ).get(args_file_flag.lstrip('''-''' ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
__a : Union[str, Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__a : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
__a , __a : str = self.parse_known_args(args=_UpperCAmelCase )
__a : Optional[int] = []
for dtype in self.dataclass_types:
__a : Optional[int] = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : List[str] = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
__a : int = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = set(args.keys() )
__a : List[str] = []
for dtype in self.dataclass_types:
__a : Dict = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__a : Tuple = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}""" )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
with open(Path(_UpperCAmelCase ) , encoding='''utf-8''' ) as open_json_file:
__a : int = json.loads(open_json_file.read() )
__a : str = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase ) | 160 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mvp"""
lowerCAmelCase_ = ["""past_key_values"""]
lowerCAmelCase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , )-> Tuple:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = use_prompt
UpperCamelCase = prompt_length
UpperCamelCase = prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
UpperCamelCase = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 251 |
'''simple docstring'''
from itertools import product
def A_( A : int , A : int):
UpperCamelCase = sides_number
UpperCamelCase = max_face_number * dice_number
UpperCamelCase = [0] * (max_total + 1)
UpperCamelCase = 1
UpperCamelCase = range(A , max_face_number + 1)
for dice_numbers in product(A , repeat=A):
UpperCamelCase = sum(A)
totals_frequencies[total] += 1
return totals_frequencies
def A_( ):
UpperCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9)
UpperCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6)
UpperCamelCase = 0
UpperCamelCase = 9
UpperCamelCase = 4 * 9
UpperCamelCase = 6
for peter_total in range(A , max_peter_total + 1):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total])
UpperCamelCase = (4**9) * (6**6)
UpperCamelCase = peter_wins_count / total_games_number
UpperCamelCase = round(A , ndigits=7)
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251 | 1 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _UpperCamelCase ( __A ) -> List[str]:
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(__A , "_dynamo" ):
return False
return isinstance(__A , torch._dynamo.eval_frame.OptimizedModule )
def _UpperCamelCase ( __A , __A = True ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ = is_compiled_module(__A )
if is_compiled:
UpperCamelCase__ = model
UpperCamelCase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__A , __A ):
UpperCamelCase__ = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ = getattr(__A , "forward" )
UpperCamelCase__ = model.__dict__.pop("_original_forward" , __A )
if original_forward is not None:
while hasattr(__A , "__wrapped__" ):
UpperCamelCase__ = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ = forward
if getattr(__A , "_converted_to_transformer_engine" , __A ):
convert_model(__A , to_transformer_engine=__A )
if is_compiled:
UpperCamelCase__ = model
UpperCamelCase__ = compiled_model
return model
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
PartialState().wait_for_everyone()
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__A , __A )
elif PartialState().local_process_index == 0:
torch.save(__A , __A )
@contextmanager
def _UpperCamelCase ( **__A ) -> int:
'''simple docstring'''
for key, value in kwargs.items():
UpperCamelCase__ = str(__A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
if not hasattr(__A , "__qualname__" ) and not hasattr(__A , "__name__" ):
UpperCamelCase__ = getattr(__A , "__class__" , __A )
if hasattr(__A , "__qualname__" ):
return obj.__qualname__
if hasattr(__A , "__name__" ):
return obj.__name__
return str(__A )
def _UpperCamelCase ( __A , __A ) -> Any:
'''simple docstring'''
for key, value in source.items():
if isinstance(__A , __A ):
UpperCamelCase__ = destination.setdefault(__A , {} )
merge_dicts(__A , __A )
else:
UpperCamelCase__ = value
return destination
def _UpperCamelCase ( __A = None ) -> bool:
'''simple docstring'''
if port is None:
UpperCamelCase__ = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 80 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80 | 1 |
from __future__ import annotations
import math
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = size
# approximate the overall size of segment tree with given value
_UpperCAmelCase : List[str] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCAmelCase : Optional[Any] = [0 for i in range(0 , 4 * size )]
_UpperCAmelCase : str = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return idx * 2
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if left_element == right_element:
_UpperCAmelCase : Optional[Any] = a[left_element - 1]
else:
_UpperCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.build(self.right(__lowerCamelCase ) , mid + 1 , __lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Optional[int] = max(
self.segment_tree[self.left(__lowerCamelCase )] , self.segment_tree[self.right(__lowerCamelCase )] )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if self.flag[idx] is True:
_UpperCAmelCase : List[Any] = self.lazy[idx]
_UpperCAmelCase : List[Any] = False
if left_element != right_element:
_UpperCAmelCase : Optional[int] = self.lazy[idx]
_UpperCAmelCase : Dict = self.lazy[idx]
_UpperCAmelCase : Any = True
_UpperCAmelCase : Any = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCAmelCase : Dict = val
if left_element != right_element:
_UpperCAmelCase : List[Any] = val
_UpperCAmelCase : Dict = val
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Optional[int] = True
return True
_UpperCAmelCase : Optional[int] = (left_element + right_element) // 2
self.update(self.left(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.update(self.right(__lowerCamelCase ) , mid + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = max(
self.segment_tree[self.left(__lowerCamelCase )] , self.segment_tree[self.right(__lowerCamelCase )] )
return True
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if self.flag[idx] is True:
_UpperCAmelCase : Dict = self.lazy[idx]
_UpperCAmelCase : Any = False
if left_element != right_element:
_UpperCAmelCase : Optional[int] = self.lazy[idx]
_UpperCAmelCase : List[Any] = self.lazy[idx]
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCAmelCase : Any = (left_element + right_element) // 2
_UpperCAmelCase : Dict = self.query(self.left(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Any = self.query(self.right(__lowerCamelCase ) , mid + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return max(__lowerCamelCase , __lowerCamelCase )
def __str__( self ):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , __lowerCamelCase , __lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 368 |
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189 | 0 |
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
for _ in range(_lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__UpperCamelCase : Tuple = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 182 | import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def A ( _lowercase , _lowercase , _lowercase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = is_small_dataset(_lowercase )
assert result == expected
| 182 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A :
snake_case__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
snake_case__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class _A :
snake_case__ : Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def A__ ( self ):
"""simple docstring"""
if self.train_file is not None:
lowercase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowercase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _A :
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """label""" if """label""" in features[0].keys() else """labels"""
lowercase = [feature.pop(__lowerCAmelCase ) for feature in features]
lowercase = len(__lowerCAmelCase )
lowercase = len(features[0]["""input_ids"""] )
lowercase = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCAmelCase )] for feature in features
]
lowercase = list(chain(*__lowerCAmelCase ) )
lowercase = self.tokenizer.pad(
__lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
lowercase = {k: v.view(__lowerCAmelCase , __lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowercase = torch.tensor(__lowerCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowercase = {}
if data_args.train_file is not None:
lowercase = data_args.train_file
if data_args.validation_file is not None:
lowercase = data_args.validation_file
lowercase = data_args.train_file.split(""".""" )[-1]
lowercase = load_dataset(
lowerCAmelCase__ , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowercase = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowercase = [f'ending{i}' for i in range(4 )]
lowercase = """sent1"""
lowercase = """sent2"""
if data_args.max_seq_length is None:
lowercase = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
lowercase = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowercase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase__ :Optional[Any] ):
lowercase = [[context] * 4 for context in examples[context_name]]
lowercase = examples[question_header_name]
lowercase = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(lowerCAmelCase__ )
]
# Flatten out
lowercase = list(chain(*lowerCAmelCase__ ) )
lowercase = list(chain(*lowerCAmelCase__ ) )
# Tokenize
lowercase = tokenizer(
lowerCAmelCase__ , lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowercase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowercase = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
lowercase = train_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowercase = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowercase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowercase = min(len(lowerCAmelCase__ ) , data_args.max_eval_samples )
lowercase = eval_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowercase = eval_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowercase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase__ :Optional[Any] ):
lowercase , lowercase = eval_predictions
lowercase = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase = train_result.metrics
lowercase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowercase = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics("""train""" , lowerCAmelCase__ )
trainer.save_metrics("""train""" , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate()
lowercase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowercase = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase__ )
trainer.save_metrics("""eval""" , lowerCAmelCase__ )
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ : int = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase__ : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase ( ) -> List[str]:
_UpperCAmelCase : int = cn.convert_to_negative(_lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCAmelCase, 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCamelCase ( ) -> Optional[int]:
_UpperCAmelCase : Tuple = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase ( ) -> Dict:
_UpperCAmelCase : Union[str, Any] = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCAmelCase : List[Any] = canny.canny(_lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase ( ) -> Tuple:
assert gg.gaussian_filter(_lowerCAmelCase, 5, sigma=0.9 ).all()
def UpperCamelCase ( ) -> str:
# laplace diagonals
_UpperCAmelCase : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCAmelCase : int = conv.img_convolve(_lowerCAmelCase, _lowerCAmelCase ).astype(_lowerCAmelCase )
assert res.any()
def UpperCamelCase ( ) -> Optional[int]:
assert med.median_filter(_lowerCAmelCase, 3 ).any()
def UpperCamelCase ( ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase : Any = sob.sobel_filter(_lowerCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase ( ) -> Dict:
_UpperCAmelCase : int = sp.make_sepia(_lowerCAmelCase, 20 )
assert sepia.all()
def UpperCamelCase ( _lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = bs.Burkes(imread(_lowerCAmelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase ( _lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[Any]:
_UpperCAmelCase : List[str] = rs.NearestNeighbour(imread(_lowerCAmelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def UpperCamelCase ( ) -> List[str]:
_UpperCAmelCase : List[Any] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
_UpperCAmelCase : Any = imread(_lowerCAmelCase, 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : int = image[x_coordinate][y_coordinate]
_UpperCAmelCase : Tuple = lbp.get_neighbors_pixel(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCAmelCase : Tuple = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
_UpperCAmelCase : List[Any] = lbp.local_binary_value(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
assert lbp_image.any()
| 246 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_UpperCAmelCase : List[str] = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : List[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_UpperCAmelCase : Dict = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
# load decoder from hub
_UpperCAmelCase : List[Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def __snake_case ( self , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Union[str, Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_A , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = feature_extractor(_A , return_tensors="""np""" )
_UpperCAmelCase : int = processor(_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=_A )
_UpperCAmelCase : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , _A=(2, 10, 16) , _A=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : str = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCAmelCase : Optional[int] = processor.decode(_A )
_UpperCAmelCase : str = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase : int = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
_UpperCAmelCase : Dict = processor.batch_decode(_A , _A )
_UpperCAmelCase : Tuple = list(_A )
with get_context("""fork""" ).Pool() as p:
_UpperCAmelCase : Tuple = decoder.decode_beams_batch(_A , _A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_decoder()
_UpperCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : List[str] = 15
_UpperCAmelCase : Dict = -20.0
_UpperCAmelCase : List[str] = -4.0
_UpperCAmelCase : Any = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Any = decoded_processor_out.text
_UpperCAmelCase : Any = list(_A )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : str = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase : List[str] = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _A , atol=1e-3 ) )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : Any = 2.0
_UpperCAmelCase : Union[str, Any] = 5.0
_UpperCAmelCase : List[Any] = -20.0
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
_UpperCAmelCase : Tuple = decoded_processor_out.text
_UpperCAmelCase : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
_UpperCAmelCase : Optional[int] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _A )
_UpperCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(_A )
_UpperCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Union[str, Any] = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = processor_wavaveca(_A , return_tensors="""np""" )
_UpperCAmelCase : Any = processor_auto(_A , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
_UpperCAmelCase : Dict = processor_wavaveca.batch_decode(_A )
_UpperCAmelCase : Optional[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def __snake_case ( _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = self._get_dummy_logits()[0]
_UpperCAmelCase : Tuple = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_A , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ) -> str:
'''simple docstring'''
import torch
_UpperCAmelCase : List[str] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_A )
_UpperCAmelCase : List[Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_UpperCAmelCase : List[Any] = iter(_A )
_UpperCAmelCase : Optional[Any] = next(_A )
_UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_UpperCAmelCase : Dict = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_A ).logits.cpu().numpy()
_UpperCAmelCase : Union[str, Any] = processor.decode(logits[0] , output_word_offsets=_A )
_UpperCAmelCase : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase : Optional[int] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_UpperCAmelCase : List[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , _A )
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , output.text )
# output times
_UpperCAmelCase : List[str] = torch.tensor(self.get_from_offsets(_A , """start_time""" ) )
_UpperCAmelCase : Any = torch.tensor(self.get_from_offsets(_A , """end_time""" ) )
# fmt: off
_UpperCAmelCase : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 246 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase: str = logging.get_logger(__name__)
@dataclass
class a__( lowerCamelCase__ ):
lowercase__ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : int , **__snake_case : Dict ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a : int = deprecated_arg[3:]
a : Union[str, Any] = not kwargs.pop(__snake_case )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
a : Optional[Any] = kwargs.pop('tpu_name' , self.tpu_name )
a : Tuple = kwargs.pop('device_idx' , self.device_idx )
a : Tuple = kwargs.pop('eager_mode' , self.eager_mode )
a : str = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**__snake_case )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Name of TPU"""} , )
lowercase__ = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def lowercase_ ( self : Dict ):
requires_backends(self , ['tf'] )
a : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
a : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
a : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
a : Any = None
return tpu
@cached_property
def lowercase_ ( self : Optional[int] ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
a : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
a : List[str] = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
a : str = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def lowercase_ ( self : Dict ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def lowercase_ ( self : Tuple ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def lowercase_ ( self : Union[str, Any] ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def lowercase_ ( self : int ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowercase_ ( self : Any ):
return self.n_gpu > 0 | 359 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = ["""pixel_values"""]
def __init__( self : List[str] , __snake_case : bool = True , __snake_case : int = 32 , __snake_case : Union[str, Any]=PILImageResampling.BILINEAR , __snake_case : bool = True , **__snake_case : List[Any] , ):
a : Optional[Any] = do_resize
a : Union[str, Any] = do_rescale
a : Union[str, Any] = size_divisor
a : List[Any] = resample
super().__init__(**__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : int , __snake_case : Tuple , __snake_case : Optional[ChannelDimension] = None , **__snake_case : Tuple ):
a , a : Optional[int] = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a : int = height // size_divisor * size_divisor
a : int = width // size_divisor * size_divisor
a : Any = resize(__snake_case , (new_h, new_w) , resample=__snake_case , data_format=__snake_case , **__snake_case )
return image
def lowercase_ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : float , __snake_case : Optional[ChannelDimension] = None , **__snake_case : Optional[Any] ):
return rescale(image=__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __snake_case : Optional[bool] = None , __snake_case : Optional[int] = None , __snake_case : Dict=None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[TensorType, str]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : Any , ):
a : List[str] = do_resize if do_resize is not None else self.do_resize
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
a : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a : Tuple = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a : str = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a : int = [self.resize(__snake_case , size_divisor=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
a : List[str] = [self.rescale(__snake_case , scale=1 / 2_55 ) for image in images]
a : Any = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
a : Any = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case ) | 96 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = None
a__ : str = 2_0
a__ : Tuple = self._get_uniform_logits(batch_size=2 , length=__SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
a__ : List[str] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
a__ : Any = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
a__ : str = jax.nn.softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
a__ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ : List[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
a__ : Tuple = jax.nn.softmax(temp_dist_warper_sharper(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 )
a__ : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = None
a__ : Dict = 1_0
a__ : List[Any] = 2
# create ramp distribution
a__ : Any = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
a__ : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
a__ : int = FlaxTopKLogitsWarper(3 )
a__ : Any = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
a__ : int = 5
a__ : Union[str, Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
a__ : List[Any] = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
a__ : List[str] = top_k_warp_safety_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = None
a__ : Any = 1_0
a__ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a__ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
a__ : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
a__ : Optional[Any] = np.exp(top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a__ : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# check edge cases with negative and extreme logits
a__ : str = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a__ : Optional[int] = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
a__ : List[str] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
a__ : Union[str, Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : int = 2_0
a__ : List[Any] = 4
a__ : List[Any] = 0
a__ : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
a__ : Any = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
a__ : List[Any] = 5
a__ : Any = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : List[str] = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
a__ : Optional[int] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : Any = 1_5
a__ : Any = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = 2_0
a__ : List[Any] = 4
a__ : Optional[Any] = 0
a__ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
a__ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=2_0 )
a__ : List[Any] = 1
a__ : Optional[int] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : Dict = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a__ : List[str] = 3
a__ : Dict = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : int = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = 2_0
a__ : Union[str, Any] = 4
a__ : str = 0
a__ : Tuple = 5
a__ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
a__ : List[str] = ids_tensor((batch_size, 4) , vocab_size=2_0 )
a__ : Optional[Any] = 4
a__ : List[Any] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : Any = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a__ : List[str] = 3
a__ : str = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : int = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Optional[Any] = 4
a__ : int = 1_0
a__ : Dict = 1_5
a__ : List[Any] = 2
a__ : int = 1
a__ : Optional[int] = 1_5
# dummy input_ids and scores
a__ : Optional[int] = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE )
a__ : Tuple = input_ids.copy()
a__ : Union[str, Any] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : Any = scores.copy()
# instantiate all dist processors
a__ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ : List[str] = FlaxTopKLogitsWarper(3 )
a__ : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a__ : Tuple = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__SCREAMING_SNAKE_CASE )
a__ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
a__ : int = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
a__ : List[str] = 1_0
# no processor list
a__ : Dict = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : Optional[int] = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : List[Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : Optional[Any] = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : Tuple = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : Tuple = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# with processor list
a__ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a__ : Tuple = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = 4
a__ : Union[str, Any] = 1_0
a__ : Union[str, Any] = 1_5
a__ : Any = 2
a__ : Optional[Any] = 1
a__ : Tuple = 1_5
# dummy input_ids and scores
a__ : int = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] = input_ids.copy()
a__ : Optional[int] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : List[str] = scores.copy()
# instantiate all dist processors
a__ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ : Optional[int] = FlaxTopKLogitsWarper(3 )
a__ : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a__ : int = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=__SCREAMING_SNAKE_CASE )
a__ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
a__ : Optional[Any] = 1_0
# no processor list
def run_no_processor_list(__lowercase , __lowercase , __lowercase ):
a__ : List[Any] = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : Dict = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : Optional[Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : Dict = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : List[str] = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
a__ : List[str] = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(__lowercase , __lowercase , __lowercase ):
a__ : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a__ : Union[str, Any] = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
return scores
a__ : Dict = jax.jit(__SCREAMING_SNAKE_CASE )
a__ : Dict = jax.jit(__SCREAMING_SNAKE_CASE )
a__ : List[Any] = jitted_run_no_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] = jitted_run_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 170 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=[30, 30] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=10 , )->List[Any]:
'''simple docstring'''
A_ : List[str] = parent
A_ : Union[str, Any] = batch_size
A_ : Optional[int] = image_size
A_ : List[str] = patch_size
A_ : str = num_channels
A_ : Optional[int] = is_training
A_ : str = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : int = intermediate_size
A_ : str = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[str] = num_labels
A_ : List[str] = scope
A_ : int = n_targets
A_ : List[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A_ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A_ : Union[str, Any] = num_patches + 1 + self.num_detection_tokens
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A_ : Dict = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A_ : int = []
for i in range(self.batch_size ):
A_ : List[Any] = {}
A_ : str = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_SCREAMING_SNAKE_CASE )
A_ : Dict = torch.rand(self.n_targets , 4 , device=_SCREAMING_SNAKE_CASE )
labels.append(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self )->Dict:
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Dict = YolosModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = YolosForObjectDetection(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(pixel_values=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A_ : Tuple = model(pixel_values=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ : Optional[Any] = config_and_inputs
A_ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
snake_case = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Any:
'''simple docstring'''
A_ : str = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A_ : List[Any] = []
for i in range(self.model_tester.batch_size ):
A_ : Union[str, Any] = {}
A_ : str = torch.ones(
size=(self.model_tester.n_targets,) , device=_SCREAMING_SNAKE_CASE , dtype=torch.long )
A_ : List[str] = torch.ones(
self.model_tester.n_targets , 4 , device=_SCREAMING_SNAKE_CASE , dtype=torch.float )
labels.append(_SCREAMING_SNAKE_CASE )
A_ : Tuple = labels
return inputs_dict
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[int] = YolosModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Tuple:
'''simple docstring'''
pass
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = True
# in YOLOS, the seq_len is different
A_ : str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A_ : Any = True
A_ : List[Any] = False
A_ : Any = True
A_ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : Dict = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : Any = True
A_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A_ : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
A_ : Tuple = True
A_ : int = True
A_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : List[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) )
A_ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _snake_case ( self )->List[str]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : str = outputs.hidden_states
A_ : Union[str, Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# YOLOS has a different seq_length
A_ : Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Union[str, Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->str:
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = YolosModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(inputs.pixel_values )
# verify outputs
A_ : int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_SCREAMING_SNAKE_CASE , )
A_ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify postprocessing
A_ : List[str] = image_processor.post_process_object_detection(
_SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A_ : List[Any] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = [75, 75, 17, 63, 17]
A_ : int = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _SCREAMING_SNAKE_CASE ) )
| 366 |
from collections import deque
from .hash_table import HashTable
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : List[str] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.values[key]
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 65 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( __snake_case : str ) -> List[str]:
"""simple docstring"""
if not is_accelerate_available():
return method
A__ : Optional[int] =version.parse(accelerate.__version__ ).base_version
if version.parse(__snake_case ) < version.parse("""0.17.0""" ):
return method
def wrapper(self : Optional[Any], *__snake_case : List[str], **__snake_case : str ):
if hasattr(self, """_hf_hook""" ) and hasattr(self._hf_hook, """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self, *__snake_case, **__snake_case )
return wrapper
| 134 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
A__ : Dict =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ : Optional[int] =CLIPTextModel(lowerCAmelCase_ )
A__ : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : str ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=0 ) -> List[str]:
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ : List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A__ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : int =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : str =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : Tuple =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Tuple =self.get_dummy_components()
A__ : str =StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
A__ : Any =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict =sd_pipe(**lowerCAmelCase_ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[Any] =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
A__ : Optional[Any] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : int =StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : str =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : Tuple =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
A__ : int ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : List[Any] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : str =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[str] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Any =PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="""scheduler""" )
A__ : Optional[int] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Any =torch.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
A__ : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 134 | 1 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(UpperCAmelCase ) / len(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : int = 6_4 ,SCREAMING_SNAKE_CASE__ : int = 2_0 ,SCREAMING_SNAKE_CASE__ : int = 7_6_8 ,SCREAMING_SNAKE_CASE__ : Any=7_7 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : str = "silu" ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "linear" ,SCREAMING_SNAKE_CASE__ : Optional[str] = "prd" ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,):
super().__init__()
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = attention_head_dim
__lowerCamelCase : Tuple = num_attention_heads * attention_head_dim
__lowerCamelCase : List[Any] = additional_embeddings
__lowerCamelCase : List[str] = time_embed_dim or inner_dim
__lowerCamelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
__lowerCamelCase : Optional[int] = clip_embed_dim or embedding_dim
__lowerCamelCase : Tuple = Timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0)
__lowerCamelCase : int = TimestepEmbedding(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,out_dim=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if embedding_proj_norm_type is None:
__lowerCamelCase : List[Any] = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase : Any = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
__lowerCamelCase : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if encoder_hid_proj_type is None:
__lowerCamelCase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase : Any = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
__lowerCamelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,SCREAMING_SNAKE_CASE__))
if added_emb_type == "prd":
__lowerCamelCase : int = nn.Parameter(torch.zeros(1 ,1 ,SCREAMING_SNAKE_CASE__))
elif added_emb_type is None:
__lowerCamelCase : Dict = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.")
__lowerCamelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dropout=SCREAMING_SNAKE_CASE__ ,activation_fn='gelu' ,attention_bias=SCREAMING_SNAKE_CASE__ ,)
for d in range(SCREAMING_SNAKE_CASE__)
])
if norm_in_type == "layer":
__lowerCamelCase : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
elif norm_in_type is None:
__lowerCamelCase : Tuple = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}.")
__lowerCamelCase : Any = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10000.0)
causal_attention_mask.triu_(1)
__lowerCamelCase : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,SCREAMING_SNAKE_CASE__ ,persistent=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Dict[str, AttentionProcessor]):
if hasattr(SCREAMING_SNAKE_CASE__ ,'set_processor'):
__lowerCamelCase : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return processors
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
__lowerCamelCase : List[Any] = len(self.attn_processors.keys())
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) and len(SCREAMING_SNAKE_CASE__) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE__)} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Tuple):
if hasattr(SCREAMING_SNAKE_CASE__ ,'set_processor'):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
module.set_processor(SCREAMING_SNAKE_CASE__)
else:
module.set_processor(processor.pop(F"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
self.set_attn_processor(AttnProcessor())
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, float, int] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.BoolTensor] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = hidden_states.shape[0]
__lowerCamelCase : int = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device)
elif torch.is_tensor(SCREAMING_SNAKE_CASE__) and len(timesteps.shape) == 0:
__lowerCamelCase : int = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : str = timesteps * torch.ones(SCREAMING_SNAKE_CASE__ ,dtype=timesteps.dtype ,device=timesteps.device)
__lowerCamelCase : Optional[int] = self.time_proj(SCREAMING_SNAKE_CASE__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase : str = timesteps_projected.to(dtype=self.dtype)
__lowerCamelCase : Dict = self.time_embedding(SCREAMING_SNAKE_CASE__)
if self.embedding_proj_norm is not None:
__lowerCamelCase : Optional[Any] = self.embedding_proj_norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.embedding_proj(SCREAMING_SNAKE_CASE__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase : Optional[Any] = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set')
__lowerCamelCase : Optional[Any] = self.proj_in(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.positional_embedding.to(hidden_states.dtype)
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(SCREAMING_SNAKE_CASE__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
__lowerCamelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
__lowerCamelCase : int = hidden_states[:, None, :]
__lowerCamelCase : List[str] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase : Any = self.prd_embedding.to(hidden_states.dtype).expand(SCREAMING_SNAKE_CASE__ ,-1 ,-1)
additional_embeds.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = torch.cat(
SCREAMING_SNAKE_CASE__ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase : Any = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase : Tuple = F.pad(
SCREAMING_SNAKE_CASE__ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
__lowerCamelCase : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase : Dict = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
__lowerCamelCase : int = F.pad(SCREAMING_SNAKE_CASE__ ,(0, self.additional_embeddings) ,value=0.0)
__lowerCamelCase : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
__lowerCamelCase : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0)
if self.norm_in is not None:
__lowerCamelCase : Union[str, Any] = self.norm_in(SCREAMING_SNAKE_CASE__)
for block in self.transformer_blocks:
__lowerCamelCase : List[Any] = block(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.norm_out(SCREAMING_SNAKE_CASE__)
if self.prd_embedding is not None:
__lowerCamelCase : Dict = hidden_states[:, -1]
else:
__lowerCamelCase : Union[str, Any] = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase : int = self.proj_to_clip_embeddings(SCREAMING_SNAKE_CASE__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 73 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149 | 0 |
'''simple docstring'''
__UpperCAmelCase :List[str] = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__UpperCAmelCase :Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def _a ( _lowercase : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _a ( _lowercase : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = '''Morse code here!'''
print(_lowercase )
__UpperCAmelCase : Union[str, Any] = encrypt(_lowercase )
print(_lowercase )
__UpperCAmelCase : Optional[Any] = decrypt(_lowercase )
print(_lowercase )
if __name__ == "__main__":
main() | 240 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCAmelCase :Tuple = "Create a default config file for Accelerate with only a few flags set."
def _a ( _lowercase : List[Any]="no" , _lowercase : str = default_json_config_file , _lowercase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_lowercase )
path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase : int = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase : Optional[Any] = torch.cuda.device_count()
__UpperCAmelCase : List[str] = num_gpus
__UpperCAmelCase : int = False
if num_gpus > 1:
__UpperCAmelCase : Any = '''MULTI_GPU'''
else:
__UpperCAmelCase : int = '''NO'''
elif is_xpu_available() and use_xpu:
__UpperCAmelCase : List[Any] = torch.xpu.device_count()
__UpperCAmelCase : List[Any] = num_xpus
__UpperCAmelCase : Optional[int] = False
if num_xpus > 1:
__UpperCAmelCase : Any = '''MULTI_XPU'''
else:
__UpperCAmelCase : Optional[Any] = '''NO'''
elif is_npu_available():
__UpperCAmelCase : Dict = torch.npu.device_count()
__UpperCAmelCase : Any = num_npus
__UpperCAmelCase : Any = False
if num_npus > 1:
__UpperCAmelCase : Dict = '''MULTI_NPU'''
else:
__UpperCAmelCase : Optional[int] = '''NO'''
else:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Tuple = '''NO'''
__UpperCAmelCase : List[Any] = ClusterConfig(**_lowercase )
config.to_json_file(_lowercase )
return path
def _a ( _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parser.add_parser('''default''' , parents=_lowercase , help=_lowercase , formatter_class=_lowercase )
parser.add_argument(
'''--config_file''' , default=_lowercase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_lowercase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_lowercase )
return parser
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' ) | 240 | 1 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE (A , A ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , A , (alpha * (np.exp(A ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ) -> Tuple:
with open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
UpperCamelCase__ : Optional[Any] = json.load(__lowerCAmelCase )
UpperCamelCase__ : str = {}
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = []
for key, info in class_info.items():
UpperCamelCase__ : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__lowerCAmelCase ) )
UpperCamelCase__ : Dict = thing_ids
UpperCamelCase__ : Optional[int] = class_names
return metadata
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Tuple=30 , SCREAMING_SNAKE_CASE : Dict=4_00 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=10 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : int=2_55 , SCREAMING_SNAKE_CASE : str="shi-labs/oneformer_demo" , SCREAMING_SNAKE_CASE : List[Any]="ade20k_panoptic.json" , SCREAMING_SNAKE_CASE : Tuple=10 , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[int] = min_resolution
UpperCamelCase__ : Union[str, Any] = max_resolution
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : List[Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
UpperCamelCase__ : Dict = do_normalize
UpperCamelCase__ : Optional[int] = image_mean
UpperCamelCase__ : Union[str, Any] = image_std
UpperCamelCase__ : Union[str, Any] = class_info_file
UpperCamelCase__ : Tuple = prepare_metadata(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = num_text
UpperCamelCase__ : int = repo_path
# for the post_process_functions
UpperCamelCase__ : int = 2
UpperCamelCase__ : str = 10
UpperCamelCase__ : Any = 10
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : List[Any] = 4
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : Tuple = do_reduce_labels
UpperCamelCase__ : List[str] = ignore_index
def __lowercase ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ : Any = int(self.size["shortest_edge"] * h / w )
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
elif w > h:
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
UpperCamelCase__ : int = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase__ : Optional[Any] = self.size["shortest_edge"]
UpperCamelCase__ : str = self.size["shortest_edge"]
else:
UpperCamelCase__ : Tuple = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase__ : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def __lowercase ( self : Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCAmelCase : List[str] = image_processing_class
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "ignore_index" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "class_info_file" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_text" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "repo_path" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "metadata" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_reduce_labels" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Dict = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase__ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase__ : List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any="np" ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase__ : Any = self.image_processing_tester.num_labels
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
UpperCamelCase__ : Tuple = num_labels
if is_instance_map:
UpperCamelCase__ : List[str] = list(range(SCREAMING_SNAKE_CASE ) ) * 2
UpperCamelCase__ : Optional[Any] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase__ : List[str] = [Image.fromarray(SCREAMING_SNAKE_CASE ) for annotation in annotations]
UpperCamelCase__ : Optional[int] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , return_tensors="pt" , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE , )
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
def common(SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : str=None ):
UpperCamelCase__ : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE , is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inputs["mask_labels"]
UpperCamelCase__ : Optional[Any] = inputs["class_labels"]
UpperCamelCase__ : List[str] = inputs["pixel_values"]
UpperCamelCase__ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = np.zeros((20, 50) )
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = binary_mask_to_rle(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase__ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase__ : Optional[Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE , target_sizes=SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) | 189 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a : Any = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
a : Optional[Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
a : List[Any] = '|'.join(sys.argv[1:])
a : int = re.compile(rF'''^({joined_dirs}).*?\.py$''')
a : int = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 82 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82 | 1 |
import math
import sys
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
if number != int(__UpperCAmelCase ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
UpperCamelCase__ : Any = [-1] * (number + 1)
UpperCamelCase__ : Any = 0
for i in range(1 , number + 1 ):
UpperCamelCase__ : Tuple = sys.maxsize
UpperCamelCase__ : Optional[int] = int(math.sqrt(__UpperCAmelCase ) )
for j in range(1 , root + 1 ):
UpperCamelCase__ : Tuple = 1 + answers[i - (j**2)]
UpperCamelCase__ : List[Any] = min(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : str = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
from math import factorial
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(__UpperCAmelCase ) // (factorial(__UpperCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 201 | 1 |
def _a ( SCREAMING_SNAKE_CASE__ : int = 50_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = set()
SCREAMING_SNAKE_CASE__ : Dict = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE__ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE__ : Optional[int] = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE__ : List[str] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE__ : Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 191 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "AutoTokenizer"
UpperCAmelCase_ = ["tokenizer"]
UpperCAmelCase_ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = speaker_embeddings
@classmethod
def A_ ( cls : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict="speaker_embeddings_path.json", **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE__ : Any = get_file_from_repo(
_UpperCAmelCase, _UpperCAmelCase, subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase, _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
SCREAMING_SNAKE_CASE__ : Dict = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase, speaker_embeddings=_UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str]="speaker_embeddings_path.json", _UpperCAmelCase : Optional[Any]="speaker_embeddings", _UpperCAmelCase : bool = False, **_UpperCAmelCase : List[str], ) -> Union[str, Any]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase, _UpperCAmelCase, "v2" ), exist_ok=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE__ : List[Any] = self._load_voice_preset(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"], _UpperCAmelCase, F'''{prompt_key}_{key}''' ), voice_preset[key], allow_pickle=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_UpperCAmelCase, F'''{prompt_key}_{key}.npy''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_dict
with open(os.path.join(_UpperCAmelCase, _UpperCAmelCase ), "w" ) as fp:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], _UpperCAmelCase : str = None, **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
SCREAMING_SNAKE_CASE__ : List[Any] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key], subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
SCREAMING_SNAKE_CASE__ : int = np.load(_UpperCAmelCase )
return voice_preset_dict
def A_ ( self : int, _UpperCAmelCase : Optional[dict] = None ) -> Optional[int]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key], np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any], _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Optional[int]="pt", _UpperCAmelCase : List[str]=2_5_6, _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Any=False, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
if voice_preset is not None and not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase, _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE__ : List[str] = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and not voice_preset.endswith(".npz" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = voice_preset + ".npz"
SCREAMING_SNAKE_CASE__ : List[str] = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
_UpperCAmelCase, return_tensors=_UpperCAmelCase, padding="max_length", max_length=_UpperCAmelCase, return_attention_mask=_UpperCAmelCase, return_token_type_ids=_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, **_UpperCAmelCase, )
if voice_preset is not None:
SCREAMING_SNAKE_CASE__ : str = voice_preset
return encoded_text
| 191 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__ : Dict = 'base_with_context'
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase : List[str] = weights[f'''layers_{lyr_num}''']
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase : Dict = ly_weight["attention"]
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase : Optional[Any] = weights[f'''layers_{lyr_num}''']
lowerCamelCase : Union[str, Any] = ly_weight["attention"]
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase : int = weights[f'''layers_{lyr_num}''']
lowerCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowerCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase : Optional[Any] = ly_weight["self_attention"]
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : str = ly_weight["MultiHeadDotProductAttention_0"]
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : str = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase : Union[str, Any] = jnp.tree_util.tree_map(onp.array ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowerCamelCase : Tuple = os.path.join(args.checkpoint_path ,".." ,"config.gin" )
lowerCamelCase : Optional[Any] = inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = inference.InferenceModel(args.checkpoint_path ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ,variance_type="fixed_large" )
lowerCamelCase : Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowerCamelCase : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["targets_context"] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowerCamelCase : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["targets_context"] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
lowerCamelCase : List[str] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = load_decoder(ta_checkpoint["target"]["decoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowerCamelCase : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_SCREAMING_SNAKE_CASE ,continuous_encoder=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,melgan=_SCREAMING_SNAKE_CASE ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
main(args)
| 48 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Any = global_rng
__lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : str = min_seq_length
__lowerCAmelCase : int = max_seq_length
__lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Any = padding_value
__lowerCAmelCase : str = sampling_rate
__lowerCAmelCase : Optional[Any] = return_attention_mask
__lowerCAmelCase : Optional[Any] = do_normalize
__lowerCAmelCase : Optional[Any] = feature_size
__lowerCAmelCase : Optional[int] = chunk_length
__lowerCAmelCase : Optional[Any] = hop_length
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ )
__lowerCAmelCase : Dict = feat_extract_first.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters
__lowerCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
__lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ )
__lowerCAmelCase : List[str] = feat_extract_first.to_dict()
__lowerCAmelCase : Tuple = feat_extract_second.to_dict()
__lowerCAmelCase : Any = feat_extract_first.mel_filters
__lowerCAmelCase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : Optional[int] = np.asarray(A_ )
__lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
__lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
__lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
import torch
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
__lowerCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__lowerCAmelCase : int = self._load_datasamples(1 )
__lowerCAmelCase : Any = WhisperFeatureExtractor()
__lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = self._load_datasamples(1 )[0]
__lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 275 | 0 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = 0
snake_case_ = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE ):
return None
snake_case_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
snake_case_ = left
snake_case_ = point
elif point > right:
snake_case_ = right
snake_case_ = point
else:
if item < current_item:
snake_case_ = point - 1
else:
snake_case_ = point + 1
return None
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
snake_case_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , point + 1 , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
if collection != sorted(SCREAMING_SNAKE_CASE ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
UpperCAmelCase = 0
if debug == 1:
UpperCAmelCase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
UpperCAmelCase = 67
UpperCAmelCase = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""") | 267 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> list[int]:
"""simple docstring"""
snake_case_ = int(SCREAMING_SNAKE_CASE )
# Initialize Result
snake_case_ = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE ):
# Find denominations
while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ):
total_value -= int(SCREAMING_SNAKE_CASE )
answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase = []
UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """) | 267 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Dict =["image_processor", "tokenizer"]
UpperCAmelCase_ : Optional[Any] ="ChineseCLIPImageProcessor"
UpperCAmelCase_ : Tuple =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : Dict = kwargs.pop("feature_extractor" )
__snake_case : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__snake_case : List[str] = self.image_processor
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Tuple = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
__snake_case : Tuple = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
__snake_case : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer.model_input_names
__snake_case : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
| 326 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any:
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = search_prob
__snake_case : str = start_temperate
__snake_case : List[Any] = []
__snake_case : str = 0
__snake_case : Dict = None
while not search_end:
__snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case : List[Any] = current_state
scores.append(lowercase )
iterations += 1
__snake_case : Dict = None
__snake_case : str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
__snake_case : int = neighbors.pop(lowercase )
__snake_case : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case : List[str] = picked_neighbor
else:
__snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case : str = picked_neighbor
__snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case : Optional[Any] = True
else:
__snake_case : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any:
return (3 * x**2) - (6 * y)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
_UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'''{local_min.score()}'''
)
| 326 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Dict =ShapEPipeline
a : Tuple =["""prompt"""]
a : Optional[Any] =["""prompt"""]
a : List[Any] =[
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
a : List[str] =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 8
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,)
return CLIPTextModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCAmelCase = PriorTransformer(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase = ShapERenderer(**__SCREAMING_SNAKE_CASE )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_prior
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_renderer
__lowerCAmelCase = HeunDiscreteScheduler(
beta_schedule="""exp""",num_train_timesteps=10_24,prediction_type="""sample""",use_karras_sigmas=__SCREAMING_SNAKE_CASE,clip_sample=__SCREAMING_SNAKE_CASE,clip_sample_range=1.0,)
__lowerCAmelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = torch_device == """cpu"""
__lowerCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2,test_max_difference=__SCREAMING_SNAKE_CASE,relax_max_difference=__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase = batch_size * [inputs[key]]
__lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE,num_images_per_prompt=__SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__lowerCAmelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase = pipe(
"""a shark""",generator=__SCREAMING_SNAKE_CASE,guidance_scale=15.0,num_inference_steps=64,frame_size=64,output_type="""np""",).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = np.max(lowercase , axis=-1 , keepdims=lowercase )
__lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {}
if "second_text" in kwargs:
__lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
return self.tokenizer(__SCREAMING_SNAKE_CASE,text_pair=__SCREAMING_SNAKE_CASE,return_tensors=self.framework )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = model_outputs.logits[0].numpy()
__lowerCAmelCase = softmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.argmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.model.config.idalabel[best_class]
__lowerCAmelCase = probabilities[best_class].item()
__lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 46 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a__ = logging.get_logger(__name__)
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
snake_case_ : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
snake_case_ : int = field(
default=1_28 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
snake_case_ : bool = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCamelCase_ ( self : int) -> Any:
"""simple docstring"""
_snake_case : Union[str, Any] = self.task_name.lower()
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = """train"""
snake_case_ : List[Any] = """dev"""
snake_case_ : Union[str, Any] = """test"""
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : GlueDataTrainingArguments
snake_case_ : str
snake_case_ : List[InputFeatures]
def __init__( self : str , lowerCAmelCase : GlueDataTrainingArguments , lowerCAmelCase : PreTrainedTokenizerBase , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Union[str, Split] = Split.train , lowerCAmelCase : Optional[str] = None , ) -> int:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCAmelCase , )
_snake_case : List[Any] = args
_snake_case : int = glue_processors[args.task_name]()
_snake_case : List[Any] = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase , lowerCAmelCase):
try:
_snake_case : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""")
# Load data features from cache or dataset file
_snake_case : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_snake_case : Union[str, Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_snake_case , _snake_case : List[Any] = label_list[2], label_list[1]
_snake_case : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_snake_case : Optional[Any] = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase):
if os.path.exists(lowerCAmelCase) and not args.overwrite_cache:
_snake_case : Optional[Any] = time.time()
_snake_case : List[str] = torch.load(lowerCAmelCase)
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start)
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''')
if mode == Split.dev:
_snake_case : Optional[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
_snake_case : int = self.processor.get_test_examples(args.data_dir)
else:
_snake_case : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
_snake_case : Optional[int] = examples[:limit_length]
_snake_case : Any = glue_convert_examples_to_features(
lowerCAmelCase , lowerCAmelCase , max_length=args.max_seq_length , label_list=lowerCAmelCase , output_mode=self.output_mode , )
_snake_case : Dict = time.time()
torch.save(self.features , lowerCAmelCase)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''')
def __len__( self : Tuple) -> str:
"""simple docstring"""
return len(self.features)
def __getitem__( self : Tuple , lowerCAmelCase : Tuple) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.label_list
| 317 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """efficientnet"""
def __init__( self : List[Any] , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 600 , lowerCAmelCase : float = 2.0 , lowerCAmelCase : float = 3.1 , lowerCAmelCase : int = 8 , lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase : List[int] = [] , lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase : float = 0.25 , lowerCAmelCase : str = "swish" , lowerCAmelCase : int = 2560 , lowerCAmelCase : str = "mean" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 0.001 , lowerCAmelCase : float = 0.99 , lowerCAmelCase : float = 0.5 , lowerCAmelCase : float = 0.2 , **lowerCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Optional[int] = num_channels
_snake_case : str = image_size
_snake_case : Tuple = width_coefficient
_snake_case : List[str] = depth_coefficient
_snake_case : List[Any] = depth_divisor
_snake_case : str = kernel_sizes
_snake_case : Any = in_channels
_snake_case : Optional[Any] = out_channels
_snake_case : str = depthwise_padding
_snake_case : Tuple = strides
_snake_case : Dict = num_block_repeats
_snake_case : int = expand_ratios
_snake_case : Tuple = squeeze_expansion_ratio
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = hidden_dim
_snake_case : Tuple = pooling_type
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = batch_norm_eps
_snake_case : Optional[Any] = batch_norm_momentum
_snake_case : str = dropout_rate
_snake_case : Union[str, Any] = drop_connect_rate
_snake_case : Optional[int] = sum(lowerCAmelCase) * 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> float:
"""simple docstring"""
return 1E-5
| 317 | 1 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_lowercase : Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''facebook/nllb-200-distilled-600M'''
lowerCAmelCase_ = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowerCAmelCase_ = '''translator'''
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSeqaSeqLM
lowerCAmelCase_ = LANGUAGE_CODES
lowerCAmelCase_ = ['''text''', '''text''', '''text''']
lowerCAmelCase_ = ['''text''']
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
lowercase_ : List[str] = self.lang_to_code[src_lang]
lowercase_ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.model.generate(**__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__SCREAMING_SNAKE_CASE )
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 264 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int ) -> bool:
'''simple docstring'''
if num < 0:
return False
_A = num
_A = 0
while num > 0:
_A = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_A = Vector()
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2] )
_A = Vector([1, 2, 3, 4, 5] )
_A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : str ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
_A = Vector([2, -1, 4] ) # for test of dot product
_A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 0, 0, 0, 0, 0] )
_A = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_A = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 315 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( a__ ):
lowercase__ : Tuple = """new-model"""
if is_tf_available():
class lowercase ( a__ ):
lowercase__ : Optional[int] = NewModelConfig
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "bert-base-cased"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __snake_case( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "bert-base-cased"
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_probability
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCamelCase ) , 14_410 )
def __snake_case( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCamelCase ) , 14_410 )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE = ["FunnelBaseModel"]
SCREAMING_SNAKE_CASE = TFAutoModel.from_config(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("new-model" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_UpperCamelCase ):
auto_class.register(_UpperCamelCase , _UpperCamelCase )
auto_class.register(_UpperCamelCase , _UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
auto_class.register(_UpperCamelCase , _UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE = auto_class.from_config(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = auto_class.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" )
def __snake_case( self : Dict ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_UpperCamelCase , revision="aaaaaa" )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(_UpperCamelCase , "Use `from_pt=True` to load this model" ):
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 370 | from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] ):
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : float = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_value
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : Dict , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 206 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_UpperCAmelCase = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : Union[str, Any] = EfficientNetConfig()
A_ : Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A_ : Dict = CONFIG_MAP[model_name]['''width_coef''']
A_ : int = CONFIG_MAP[model_name]['''depth_coef''']
A_ : Any = CONFIG_MAP[model_name]['''image_size''']
A_ : Any = CONFIG_MAP[model_name]['''dropout_rate''']
A_ : List[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A_ : str = '''huggingface/label-files'''
A_ : Any = '''imagenet-1k-id2label.json'''
A_ : Dict = 10_00
A_ : List[Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : Dict = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[Any] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : Union[str, Any] = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw )
return im
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Union[str, Any] = CONFIG_MAP[model_name]['''image_size''']
A_ : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=__lowercase ,)
return preprocessor
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
A_ : str = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
A_ : Tuple = sorted(set(__lowercase ) )
A_ : Optional[Any] = len(__lowercase )
A_ : Union[str, Any] = {b: str(__lowercase ) for b, i in zip(__lowercase ,range(__lowercase ) )}
A_ : int = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
A_ : List[Any] = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
A_ : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A_ : str = '''efficientnet.''' + item[1]
A_ : Optional[Any] = '''classifier.weight'''
A_ : Tuple = '''classifier.bias'''
return key_mapping
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Any ,__lowercase : int ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
A_ : List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A_ : Dict = torch.from_numpy(__lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
A_ : str = torch.from_numpy(__lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
A_ : Tuple = torch.from_numpy(np.transpose(__lowercase ) )
else:
A_ : List[str] = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Optional[int] ,__lowercase : Any ):
'''simple docstring'''
A_ : List[Any] = model_classes[model_name](
include_top=__lowercase ,weights='imagenet' ,input_tensor=__lowercase ,input_shape=__lowercase ,pooling=__lowercase ,classes=10_00 ,classifier_activation='softmax' ,)
A_ : Dict = original_model.trainable_variables
A_ : Union[str, Any] = original_model.non_trainable_variables
A_ : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A_ : Optional[Any] = param.numpy()
A_ : Tuple = list(tf_params.keys() )
# Load HuggingFace model
A_ : Tuple = get_efficientnet_config(__lowercase )
A_ : List[Any] = EfficientNetForImageClassification(__lowercase ).eval()
A_ : str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
A_ : Dict = rename_keys(__lowercase )
replace_params(__lowercase ,__lowercase ,__lowercase )
# Initialize preprocessor and preprocess input image
A_ : Tuple = convert_image_processor(__lowercase )
A_ : Optional[int] = preprocessor(images=prepare_img() ,return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A_ : Optional[Any] = hf_model(**__lowercase )
A_ : Optional[Any] = outputs.logits.detach().numpy()
# Original model inference
A_ : int = False
A_ : str = CONFIG_MAP[model_name]['''image_size''']
A_ : Tuple = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
A_ : Optional[int] = image.img_to_array(__lowercase )
A_ : List[str] = np.expand_dims(__lowercase ,axis=0 )
A_ : List[Any] = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase ,__lowercase ,atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
A_ : Dict = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
_UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 140 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : int , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = path_or_paths
UpperCamelCase__ : List[Any] = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__ ) else '''train'''
UpperCamelCase__ : Optional[Any] = features
UpperCamelCase__ : List[Any] = cache_dir
UpperCamelCase__ : Optional[int] = keep_in_memory
UpperCamelCase__ : int = streaming
UpperCamelCase__ : Union[str, Any] = num_proc
UpperCamelCase__ : List[Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : int , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = features
UpperCamelCase__ : Optional[int] = cache_dir
UpperCamelCase__ : Union[str, Any] = keep_in_memory
UpperCamelCase__ : Tuple = streaming
UpperCamelCase__ : Optional[Any] = num_proc
UpperCamelCase__ : Union[str, Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 146 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowercase = logging.getLogger(__name__)
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _lowercase :
"""simple docstring"""
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _lowercase :
"""simple docstring"""
lowercase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
lowercase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
lowercase__ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase__ = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase =processors[data_args.task_name]()
__UpperCamelCase =processor.get_labels()
__UpperCamelCase =len(__UpperCamelCase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase : EvalPrediction ) -> Dict:
__UpperCamelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
__UpperCamelCase =DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase =Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCamelCase =trainer.evaluate()
__UpperCamelCase =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __UpperCamelCase , __UpperCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__UpperCamelCase )
return results
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 85 | """simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase ='''Encode this sequence.'''
__UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
__UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase ='''Encode <mask> sequence'''
__UpperCamelCase ='''Encode <mask>sequence'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
__UpperCamelCase =tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__UpperCamelCase =tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 85 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = 20
_UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
_UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCAmelCase = jax.nn.softmax(UpperCAmelCase , axis=-1 )
_UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
_UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = 10
_UpperCAmelCase = 2
# create ramp distribution
_UpperCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCAmelCase = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCAmelCase = 5
_UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
_UpperCAmelCase = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = 10
_UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
_UpperCAmelCase = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_UpperCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCAmelCase = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 20
_UpperCAmelCase = 4
_UpperCAmelCase = 0
_UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
_UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCAmelCase = 5
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = 15
_UpperCAmelCase = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 20
_UpperCAmelCase = 4
_UpperCAmelCase = 0
_UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
_UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCAmelCase = 1
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCAmelCase = 3
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 20
_UpperCAmelCase = 4
_UpperCAmelCase = 0
_UpperCAmelCase = 5
_UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCAmelCase = 4
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCAmelCase = 3
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 10
_UpperCAmelCase = 15
_UpperCAmelCase = 2
_UpperCAmelCase = 1
_UpperCAmelCase = 15
# dummy input_ids and scores
_UpperCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
_UpperCAmelCase = input_ids.copy()
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = scores.copy()
# instantiate all dist processors
_UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
_UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
_UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_UpperCAmelCase = 10
# no processor list
_UpperCAmelCase = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
_UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 10
_UpperCAmelCase = 15
_UpperCAmelCase = 2
_UpperCAmelCase = 1
_UpperCAmelCase = 15
# dummy input_ids and scores
_UpperCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
_UpperCAmelCase = input_ids.copy()
_UpperCAmelCase = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = scores.copy()
# instantiate all dist processors
_UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
_UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
_UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_UpperCAmelCase = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
_UpperCAmelCase = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
_UpperCAmelCase = jax.jit(UpperCAmelCase )
_UpperCAmelCase = jax.jit(UpperCAmelCase )
_UpperCAmelCase = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 39 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case_ : Union[str, Any] = 50_00_00
snake_case_ ,snake_case_ : Optional[int] = os.path.split(__file__)
snake_case_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[int] = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase_ : Dict = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__, '''dataset.arrow''' ), SCREAMING_SNAKE_CASE__, num_examples=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples['''text'''] )
UpperCAmelCase_ : List[str] = map(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''numpy''' ):
UpperCAmelCase_ : Dict = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''pandas''' ):
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[int] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = map(SCREAMING_SNAKE_CASE__, function=SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 125 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = 0
A__ = len(__lowerCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __lowerCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if len(__lowerCAmelCase ) <= 1:
return arr, 0
A__ = len(__lowerCAmelCase ) // 2
A__ = arr[0:mid]
A__ = arr[mid:]
A__ , A__ = count_inversions_recursive(__lowerCAmelCase )
A__ , A__ = count_inversions_recursive(__lowerCAmelCase )
A__ , A__ = _count_cross_inversions(__lowerCAmelCase , __lowerCAmelCase )
A__ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = []
A__ = A__ = A__ = 0
while i < len(__lowerCAmelCase ) and j < len(__lowerCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__lowerCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__lowerCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ = count_inversions_bf(__lowerCAmelCase )
A__ , A__ = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __lowerCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ = count_inversions_bf(__lowerCAmelCase )
A__ , A__ = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __lowerCAmelCase )
# an empty list should also have zero inversions
A__ = []
A__ = count_inversions_bf(__lowerCAmelCase )
A__ , A__ = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 353 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 231 | 0 |
def __lowercase ( a__ , a__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__SCREAMING_SNAKE_CASE = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(a__ ) )
return round(a__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_a : List[Any]= logging.get_logger(__name__)
_a : Any= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : int= {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
_a : Optional[Any]= {
"junnyu/roformer_chinese_small": 1_536,
"junnyu/roformer_chinese_base": 1_536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
_a : str= {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Dict = RoFormerTokenizer
def __init__(self : List[Any] , _A : Any=None , _A : int=None , _A : Dict=True , _A : List[Any]="[UNK]" , _A : Tuple="[SEP]" , _A : List[Any]="[PAD]" , _A : str="[CLS]" , _A : int="[MASK]" , _A : Optional[int]=True , _A : List[str]=None , **_A : int , ) -> Dict:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , _A) != do_lower_case
or pre_tok_state.get('strip_accents' , _A) != strip_accents
):
__snake_case : Union[str, Any] = getattr(_A , pre_tok_state.pop('type'))
__snake_case : Union[str, Any] = do_lower_case
__snake_case : str = strip_accents
__snake_case : Optional[int] = pre_tok_class(**_A)
__snake_case : int = do_lower_case
def __getstate__(self : Optional[Any]) -> Dict:
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : int = BertPreTokenizer()
return state
def __setstate__(self : Optional[Any] , _A : Optional[Any]) -> Dict:
__snake_case : List[str] = d
__snake_case : str = self.__dict__['_tokenizer'].get_vocab()
__snake_case : int = PreTokenizer.custom(JiebaPreTokenizer(_A))
def _lowercase (self : int , _A : Tuple , _A : Any=None) -> str:
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : List[str] , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : List[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : List[Any] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def _lowercase (self : int , _A : Optional[int] , _A : Tuple=None , _A : Tuple=None , _A : Dict=False , **_A : Optional[int] , ) -> Optional[Any]:
__snake_case : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(_A , _A , _A , _A , **_A)
| 95 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
__snake_case : Any = precision
__snake_case : Optional[int] = ceil(precision / 14 )
__snake_case : List[Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
__snake_case : Optional[Any] = 1
__snake_case : Union[str, Any] = 13_59_14_09
__snake_case : int = Decimal(UpperCAmelCase_ )
for k in range(1 , UpperCAmelCase_ ):
__snake_case : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase_ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_a : List[Any]= 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 95 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A:
'''simple docstring'''
def __init__( self : str , A_ : Optional[Any] , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 2
lowerCamelCase_ = 99
lowerCamelCase_ = 0
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 'last'
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = 0
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase_ = None
if self.use_input_lengths:
lowerCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int , A_ : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : int , A_ : Tuple , A_ : Optional[int] , A_ : Optional[int] , A_ : str , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Tuple , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : Any , A_ : Any , A_ : Dict , A_ : str , A_ : List[Any] , A_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertWithLMHeadModel(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , A_ : Tuple , A_ : Any , A_ : Any , A_ : List[Any] , A_ : Dict , A_ : List[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForQuestionAnsweringSimple(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[int] , A_ : List[Any] , A_ : str , A_ : List[str] , A_ : Dict , A_ : Optional[Any] , A_ : Tuple , A_ : str , A_ : Optional[int] , A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForSequenceClassification(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[Any] , A_ : int , A_ : Any , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Union[str, Any] , A_ : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFFlaubertForTokenClassification(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : str , A_ : Tuple , A_ : str , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFFlaubertForMultipleChoice(config=A_ )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Union[str, Any] , A_ : Any , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCamelCase_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 204 |
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : list[float] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCamelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase ) )
return round(lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 1 |
import csv
import tweepy
# Twitter API credentials
a__ = ''''''
a__ = ''''''
a__ = ''''''
a__ = ''''''
def __UpperCAmelCase ( __a : str ) -> None:
"""simple docstring"""
_a : int = tweepy.OAuthHandler(__a ,__a )
auth.set_access_token(__a ,__a )
_a : Union[str, Any] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
_a : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a : List[str] = api.user_timeline(screen_name=__a ,count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
_a : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_a : str = api.user_timeline(
screen_name=__a ,count=200 ,max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
_a : Optional[int] = alltweets[-1].id - 1
print(F"""...{len(__a )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a : List[str] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" ,'''w''' ) as f:
_a : List[str] = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape
a_ : List[str] = jax.image.resize(
SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a_ : str = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : int = None
snake_case__ : float = 0.0
snake_case__ : bool = None
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : Any = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : int = nn.Dropout(self.dropout_prob )
a_ : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a_ : List[Any] = None
if use_nin_shortcut:
a_ : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int:
a_ : List[Any] = hidden_states
a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ )
a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 )
a_ : Optional[int] = hidden_states + temb
a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ )
if self.conv_shortcut is not None:
a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ )
return hidden_states + residual
| 32 | 0 |
"""simple docstring"""
from collections.abc import Generator
def lowercase__ ( ):
__UpperCAmelCase , __UpperCAmelCase = 0, 1
while True:
__UpperCAmelCase , __UpperCAmelCase = b, a + b
yield b
def lowercase__ ( snake_case_ :int = 1_000 ):
__UpperCAmelCase = 1
__UpperCAmelCase = fibonacci_generator()
while len(str(next(snake_case_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 86 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [0] * len(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
# use last results for better performance - dynamic programming
lowerCamelCase_ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCamelCase_ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCamelCase_ = j
return prefix_result
def lowerCamelCase_ ( lowerCamelCase__ ):
return max(prefix_function(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_snake_case = 1
_snake_case = 1
while repunit:
_snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 100_0000 ):
_snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''') | 355 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCAmelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=16 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=14 , UpperCAmelCase=10 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=25 , UpperCAmelCase=5 , ) -> int:
_snake_case = d_model
_snake_case = parent
_snake_case = batch_size
_snake_case = prediction_length
_snake_case = context_length
_snake_case = cardinality
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = embedding_dimension
_snake_case = is_training
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = context_length
_snake_case = prediction_length + label_length
_snake_case = label_length
_snake_case = moving_average
_snake_case = autocorrelation_factor
def lowercase (self ) -> str:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase (self , UpperCAmelCase ) -> Tuple:
_snake_case = config.context_length + max(config.lags_sequence )
_snake_case = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_snake_case = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_snake_case = floats_tensor([self.batch_size, _past_length] )
_snake_case = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_snake_case = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_snake_case = floats_tensor([self.batch_size, config.prediction_length] )
_snake_case = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase (self ) -> int:
_snake_case = self.get_config()
_snake_case = self.prepare_autoformer_inputs_dict(UpperCAmelCase )
return config, inputs_dict
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_snake_case = AutoformerModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
_snake_case = model(**UpperCAmelCase )
_snake_case = outputs.encoder_last_hidden_state
_snake_case = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
_snake_case = AutoformerEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = model.create_network_inputs(**UpperCAmelCase )
_snake_case, _snake_case = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_snake_case = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_snake_case = encoder(inputs_embeds=UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_snake_case = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_snake_case = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_snake_case = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_snake_case = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
_snake_case = AutoformerDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = decoder(
trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase_ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase_ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> List[Any]:
_snake_case = AutoformerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase (self ) -> Any:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
_snake_case, _snake_case = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase (self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> Any:
_snake_case = inspect.signature(getattr(UpperCAmelCase , """forward""" ) )
# The main input is the name of the argument after `self`
_snake_case = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase )
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , """seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """decoder_seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """encoder_seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """d_model""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """num_attention_heads""" , UpperCAmelCase )
_snake_case = d_model // num_attention_heads
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_snake_case = len(UpperCAmelCase )
_snake_case = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# decoder attentions
_snake_case = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_snake_case = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase (self ) -> List[Any]:
super().test_retain_grad_hidden_states_attentions()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE="train-batch.pt" ):
_snake_case = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Union[str, Any]:
_snake_case = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch()
with torch.no_grad():
_snake_case = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_snake_case = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> str:
_snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_snake_case = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_snake_case = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Optional[int]:
_snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_snake_case = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_snake_case = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase )
_snake_case = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCAmelCase )
_snake_case = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1 ) ) | 270 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = IFPipeline
UpperCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCAmelCase_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) ->List[str]:
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
# if
lowerCAmelCase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
lowerCAmelCase , lowerCAmelCase = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase = None
lowerCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , original_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE , negative_prompt_embeds=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , original_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 338 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any]="ro" , SCREAMING_SNAKE_CASE_ : Any="en" , SCREAMING_SNAKE_CASE_ : List[Any]="wmt16" , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
__lowerCAmelCase = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
__lowerCAmelCase = F"""{dataset}-{pair}"""
__lowerCAmelCase = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowerCAmelCase = "val" if split == "validation" else split
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.source""" )
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.target""" )
__lowerCAmelCase = src_path.open("w+" )
__lowerCAmelCase = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowerCAmelCase = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 351 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
_a : str = field(
default=snake_case__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(snake_case__ )} )
_a : str = field(
default=snake_case__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a : int = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_a : int = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_a : int = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_a : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_a : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a__ ( snake_case__ ):
_a : Any = """train"""
_a : Union[str, Any] = """dev"""
class a__ ( snake_case__ ):
_a : SquadDataTrainingArguments
_a : List[SquadFeatures]
_a : Split
_a : bool
def __init__( self , _A , _A , _A = None , _A = Split.train , _A = False , _A = None , _A = "pt" , ):
"""simple docstring"""
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = "v2" if args.version_2_with_negative else "v1"
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["features"]
__lowerCAmelCase = self.old_features.get("dataset" , _A )
__lowerCAmelCase = self.old_features.get("examples" , _A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__lowerCAmelCase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 102 | 0 |
snake_case : List[str] = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
snake_case : List[Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def __lowerCamelCase ( ):
"""simple docstring"""
a :List[str] = '''Morse code here!'''
print(UpperCAmelCase_ )
a :Optional[int] = encrypt(UpperCAmelCase_ )
print(UpperCAmelCase_ )
a :Union[str, Any] = decrypt(UpperCAmelCase_ )
print(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 94 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[int] = 3_84
__lowercase : str = 7
if "tiny" in model_name:
__lowercase : List[str] = 96
__lowercase : Any = (2, 2, 6, 2)
__lowercase : Dict = (3, 6, 12, 24)
elif "small" in model_name:
__lowercase : str = 96
__lowercase : Optional[int] = (2, 2, 18, 2)
__lowercase : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
__lowercase : Tuple = 1_28
__lowercase : Tuple = (2, 2, 18, 2)
__lowercase : int = (4, 8, 16, 32)
__lowercase : str = 12
__lowercase : Any = 5_12
elif "large" in model_name:
__lowercase : List[str] = 1_92
__lowercase : List[Any] = (2, 2, 18, 2)
__lowercase : Optional[Any] = (6, 12, 24, 48)
__lowercase : Optional[int] = 12
__lowercase : Optional[Any] = 7_68
# set label information
__lowercase : Any = 1_50
__lowercase : Tuple = '''huggingface/label-files'''
__lowercase : int = '''ade20k-id2label.json'''
__lowercase : Union[str, Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowercase : Any = SwinConfig(
embed_dim=__UpperCamelCase , depths=__UpperCamelCase , num_heads=__UpperCamelCase , window_size=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
__lowercase : List[Any] = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = dct.pop(__UpperCamelCase )
__lowercase : Any = val
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase : Dict = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowercase : int = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : List[Any] = in_proj_weight[:dim, :]
__lowercase : Tuple = in_proj_bias[: dim]
__lowercase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowercase : int = in_proj_bias[
dim : dim * 2
]
__lowercase : str = in_proj_weight[
-dim :, :
]
__lowercase : List[Any] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : str = x.shape
__lowercase : List[str] = x.reshape(__UpperCamelCase , 4 , in_channel // 4 )
__lowercase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : Optional[int] = x.shape
__lowercase : Union[str, Any] = x.reshape(__UpperCamelCase , in_channel // 4 , 4 )
__lowercase : int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = x.shape[0]
__lowercase : List[str] = x.reshape(4 , in_channel // 4 )
__lowercase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Union[str, Any] = x.shape[0]
__lowercase : List[str] = x.reshape(in_channel // 4 , 4 )
__lowercase : Union[str, Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__lowercase : Any = model_name_to_url[model_name]
__lowercase : Any = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' , file_name=__UpperCamelCase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__UpperCamelCase , param.shape )
__lowercase : Tuple = get_upernet_config(__UpperCamelCase )
__lowercase : List[Any] = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase : Optional[Any] = state_dict.pop(__UpperCamelCase )
if "bn" in key:
__lowercase : List[Any] = key.replace('''bn''' , '''batch_norm''' )
__lowercase : Optional[Any] = val
# rename keys
__lowercase : Tuple = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_reduction_order(__UpperCamelCase )
if "norm" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_norm_order(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
__lowercase : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowercase : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
__lowercase : Union[str, Any] = SegformerImageProcessor()
__lowercase : int = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__lowercase : List[Any] = model(__UpperCamelCase )
__lowercase : Union[str, Any] = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowercase : Tuple = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
__lowercase : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
__lowercase : Optional[int] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
__lowercase : Any = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 249 | 0 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 2_000_000 ) -> int:
"""simple docstring"""
lowerCAmelCase = [0 for i in range(n + 1 )]
lowerCAmelCase = 1
lowerCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 1
lowerCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''') | 187 |
'''simple docstring'''
from __future__ import annotations
class __snake_case:
'''simple docstring'''
def __init__( self , A_ = 0 ) -> Dict:
lowerCAmelCase = key
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def __snake_case ( self , A_ , A_ ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 187 | 1 |
def _UpperCamelCase ( snake_case__ = "The quick brown fox jumps over the lazy dog", ) -> bool:
__UpperCAmelCase : str = set()
# Replace all the whitespace in our sentence
__UpperCAmelCase : Optional[Any] = input_str.replace(" ", "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(snake_case__ ) == 26
def _UpperCamelCase ( snake_case__ = "The quick brown fox jumps over the lazy dog", ) -> bool:
__UpperCAmelCase : Union[str, Any] = [False] * 26
for char in input_str:
if char.islower():
__UpperCAmelCase : Any = True
elif char.isupper():
__UpperCAmelCase : List[str] = True
return all(snake_case__ )
def _UpperCamelCase ( snake_case__ = "The quick brown fox jumps over the lazy dog", ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _UpperCamelCase ( ) -> None:
from timeit import timeit
__UpperCAmelCase : Any = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()", setup=snake_case__ ) )
print(timeit("is_pangram_faster()", setup=snake_case__ ) )
print(timeit("is_pangram_fastest()", setup=snake_case__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 157 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Union[str, Any] ) -> Dict:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : Optional[Any] = kwargs.get("model_save_dir" , __lowerCamelCase )
__UpperCAmelCase : str = kwargs.get("latest_model_name" , __lowerCamelCase )
def __call__( self: int , **__lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Tuple=None ) -> List[str]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__UpperCAmelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Any , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[Union[bool, str, None]] = None , __lowerCamelCase: Optional[Union[str, None]] = None , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional["ort.SessionOptions"] = None , **__lowerCamelCase: Union[str, Any] , ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).parent
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).name
__UpperCAmelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : int = None
if len(str(__lowerCamelCase ).split("@" ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 157 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 369 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 261 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_A = HfArgumentParser(InitializationArguments)
_A = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_A = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_A = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_A = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_A = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE :str = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 60 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=2 , _lowercase=32 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=4 , _lowercase=[0, 1, 2, 3] , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=3 , _lowercase=[1, 384, 24, 24] , _lowercase=True , _lowercase=None , )-> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = backbone_out_indices
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = backbone_featmap_shape
UpperCamelCase_ = scope
UpperCamelCase_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Any:
UpperCamelCase_ = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> str:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ :Any = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :List[Any] = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = DPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def UpperCAmelCase_ ( self )-> Any:
pass
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_lowercase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
if model_class in get_values(_lowercase ):
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = False
UpperCamelCase_ = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase_ = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
UpperCamelCase_ = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=_lowercase )
# Skip the check for the backbone
UpperCamelCase_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self )-> int:
pass
@slow
def UpperCAmelCase_ ( self )-> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase_ = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = "add"
with self.assertRaises(_lowercase ):
UpperCamelCase_ = DPTForDepthEstimation(_lowercase )
def lowerCAmelCase( )-> Any:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
UpperCamelCase_ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(_lowercase )
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_lowercase )
UpperCamelCase_ = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _lowercase )
UpperCamelCase_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _lowercase , atol=1e-4 ) )
| 60 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A_ ( _a ):
'''simple docstring'''
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCAmelCase_ (self ) -> Optional[int]:
super().setup()
__UpperCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__(self , *lowercase__ , **lowercase__ ) -> Tuple:
__UpperCAmelCase = super().__call__(*lowercase__ , **lowercase__ )
__UpperCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A_ ( _a ):
'''simple docstring'''
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
def cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
__UpperCAmelCase = logits.shape[-1]
__UpperCAmelCase = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE )[None]).astype('''f4''' )
__UpperCAmelCase = jax.nn.log_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
__UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__UpperCAmelCase = reduction(SCREAMING_SNAKE_CASE )
return loss
__UpperCAmelCase = partial(SCREAMING_SNAKE_CASE , reduction=jnp.mean )
__UpperCAmelCase = cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A_ :
'''simple docstring'''
a__ = "google/bigbird-roberta-base"
a__ = 30_00
a__ = 1_05_00
a__ = 1_28
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_00_00
a__ = 0.00_95
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCAmelCase_ (self ) -> Optional[int]:
os.makedirs(self.base_dir , exist_ok=lowercase__ )
__UpperCAmelCase = os.path.join(self.base_dir , self.save_dir )
__UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 40_96 # no dynamic padding on TPUs
def __call__(self , lowercase__ ) -> int:
__UpperCAmelCase = self.collate_fn(lowercase__ )
__UpperCAmelCase = jax.tree_util.tree_map(lowercase__ , lowercase__ )
return batch
def lowerCAmelCase_ (self , lowercase__ ) -> int:
__UpperCAmelCase , __UpperCAmelCase = self.fetch_inputs(features['''input_ids'''] )
__UpperCAmelCase = {
'''input_ids''': jnp.array(lowercase__ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowercase__ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[Any]:
__UpperCAmelCase = [self._fetch_inputs(lowercase__ ) for ids in input_ids]
return zip(*lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = [1 for _ in range(len(lowercase__ ) )]
while len(lowercase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
if seed is not None:
__UpperCAmelCase = dataset.shuffle(seed=SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) // batch_size ):
__UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='''batch''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
def loss_fn(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = model_inputs.pop('''start_labels''' )
__UpperCAmelCase = model_inputs.pop('''end_labels''' )
__UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
__UpperCAmelCase = state.apply_fn(**SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE , dropout_rng=SCREAMING_SNAKE_CASE , train=SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
__UpperCAmelCase , __UpperCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = jax.value_and_grad(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = grad_fn(state.params )
__UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__UpperCAmelCase = jax.lax.pmean(SCREAMING_SNAKE_CASE , '''batch''' )
__UpperCAmelCase = state.apply_gradients(grads=SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __a ( SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = model_inputs.pop('''start_labels''' )
__UpperCAmelCase = model_inputs.pop('''end_labels''' )
__UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
__UpperCAmelCase = state.apply_fn(**SCREAMING_SNAKE_CASE , params=state.params , train=SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = outputs
__UpperCAmelCase = state.loss_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class A_ ( train_state.TrainState ):
'''simple docstring'''
a__ = struct.field(pytree_node=_a )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ) -> int:
__UpperCAmelCase = model.params
__UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , loss_fn=lowercase__ , )
if ckpt_dir is not None:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = restore_checkpoint(lowercase__ , lowercase__ )
__UpperCAmelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__UpperCAmelCase , __UpperCAmelCase = build_tx(**lowercase__ )
__UpperCAmelCase = train_state.TrainState(
step=lowercase__ , apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , opt_state=lowercase__ , )
__UpperCAmelCase = args
__UpperCAmelCase = data_collator
__UpperCAmelCase = lr
__UpperCAmelCase = params
__UpperCAmelCase = jax_utils.replicate(lowercase__ )
return state
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = self.args
__UpperCAmelCase = len(lowercase__ ) // args.batch_size
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(lowercase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCAmelCase = get_batched_dataset(lowercase__ , args.batch_size , seed=lowercase__ )
__UpperCAmelCase = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc=F'''Running EPOCH-{epoch}''' ):
__UpperCAmelCase = self.data_collator(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.train_step_fn(lowercase__ , lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__UpperCAmelCase = jax_utils.unreplicate(state.step )
__UpperCAmelCase = running_loss.item() / i
__UpperCAmelCase = self.scheduler_fn(state_step - 1 )
__UpperCAmelCase = self.evaluate(lowercase__ , lowercase__ )
__UpperCAmelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowercase__ ) )
self.logger.log(lowercase__ , commit=lowercase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Tuple:
__UpperCAmelCase = get_batched_dataset(lowercase__ , self.args.batch_size )
__UpperCAmelCase = len(lowercase__ ) // self.args.batch_size
__UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCAmelCase = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc='''Evaluating ... ''' ):
__UpperCAmelCase = self.data_collator(lowercase__ )
__UpperCAmelCase = self.val_step_fn(lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Any:
__UpperCAmelCase = jax_utils.unreplicate(lowercase__ )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(lowercase__ , params=state.params )
with open(os.path.join(lowercase__ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowercase__ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowercase__ , '''data_collator.joblib''' ) )
with open(os.path.join(lowercase__ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowercase__ )
print('''DONE''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__UpperCAmelCase = from_bytes(state.opt_state , f.read() )
__UpperCAmelCase = joblib.load(os.path.join(SCREAMING_SNAKE_CASE , '''args.joblib''' ) )
__UpperCAmelCase = joblib.load(os.path.join(SCREAMING_SNAKE_CASE , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''training_state.json''' ) , '''r''' ) as f:
__UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = num_train_steps - warmup_steps
__UpperCAmelCase = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE , end_value=SCREAMING_SNAKE_CASE , transition_steps=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
def weight_decay_mask(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = scheduler_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE , weight_decay=SCREAMING_SNAKE_CASE , mask=SCREAMING_SNAKE_CASE )
return tx, lr
| 333 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Any = 'instructblip_vision_model'
def __init__( self : Optional[int] , A : str=1_4_0_8 , A : Dict=6_1_4_4 , A : Tuple=3_9 , A : Optional[int]=1_6 , A : int=2_2_4 , A : Optional[Any]=1_4 , A : Union[str, Any]="gelu" , A : Optional[int]=1e-6 , A : Union[str, Any]=0.0 , A : Tuple=1e-10 , A : Union[str, Any]=True , **A : int , ):
super().__init__(**A )
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = qkv_bias
@classmethod
def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : List[Any] ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'instructblip_qformer'
def __init__( self : Any , A : Union[str, Any]=3_0_5_2_2 , A : List[str]=7_6_8 , A : Dict=1_2 , A : List[str]=1_2 , A : List[str]=3_0_7_2 , A : Dict="gelu" , A : str=0.1 , A : Union[str, Any]=0.1 , A : Dict=5_1_2 , A : Union[str, Any]=0.02 , A : str=1e-12 , A : str=0 , A : Optional[int]="absolute" , A : List[str]=2 , A : List[Any]=1_4_0_8 , **A : Union[str, Any] , ):
super().__init__(pad_token_id=A , **A )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : List[Any] = position_embedding_type
_UpperCAmelCase : Union[str, Any] = cross_attention_frequency
_UpperCAmelCase : Optional[int] = encoder_hidden_size
@classmethod
def snake_case_ ( cls : List[Any] , A : Union[str, os.PathLike] , **A : Optional[int] ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Tuple = cls.get_config_dict(A , **A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Any = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'instructblip'
__SCREAMING_SNAKE_CASE : Tuple = True
def __init__( self : Optional[int] , A : List[str]=None , A : Any=None , A : Tuple=None , A : int=3_2 , **A : Optional[int] ):
super().__init__(**A )
if vision_config is None:
_UpperCAmelCase : Union[str, Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : Union[str, Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : Dict = InstructBlipVisionConfig(**A )
_UpperCAmelCase : List[Any] = InstructBlipQFormerConfig(**A )
_UpperCAmelCase : int = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : Tuple = CONFIG_MAPPING[text_model_type](**A )
_UpperCAmelCase : int = self.text_config.tie_word_embeddings
_UpperCAmelCase : List[str] = self.text_config.is_encoder_decoder
_UpperCAmelCase : Optional[int] = num_query_tokens
_UpperCAmelCase : int = self.vision_config.hidden_size
_UpperCAmelCase : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : Tuple = 1.0
_UpperCAmelCase : Any = 0.02
@classmethod
def snake_case_ ( cls : int , A : InstructBlipVisionConfig , A : InstructBlipQFormerConfig , A : PretrainedConfig , **A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A , )
def snake_case_ ( self : str ):
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Tuple = self.vision_config.to_dict()
_UpperCAmelCase : str = self.qformer_config.to_dict()
_UpperCAmelCase : Any = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 202 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] ) -> None:
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 202 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 5 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for dlist, weight in zip(A__ , A__ ):
lowerCAmelCase_ : Tuple = min(A__ )
lowerCAmelCase_ : str = max(A__ )
lowerCAmelCase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ : List[Any] = f'Invalid weight of {weight:f} provided'
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_data(A__ )
lowerCAmelCase_ : Tuple = calculate_each_score(A__ , A__ )
lowerCAmelCase_ : Optional[int] = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 120 | 0 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
UpperCAmelCase = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
UpperCAmelCase = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def lowerCamelCase (a_ :Tuple , a_ :Optional[int]) -> Tuple:
return float((preds == labels).mean())
def lowerCamelCase (a_ :Dict , a_ :str) -> Optional[int]:
lowercase :Union[str, Any] = simple_accuracy(a_ , a_)
lowercase :List[str] = float(fa_score(y_true=a_ , y_pred=a_))
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase (a_ :int , a_ :List[Any]) -> List[Any]:
lowercase :Optional[int] = np.array(a_)
lowercase :List[str] = np.array(a_)
lowercase :Optional[int] = en_sentvecs.shape[0]
# mean centering
lowercase :List[Any] = en_sentvecs - np.mean(a_ , axis=0)
lowercase :List[str] = in_sentvecs - np.mean(a_ , axis=0)
lowercase :int = cdist(a_ , a_ , '''cosine''')
lowercase :Tuple = np.array(range(a_))
lowercase :Any = sim.argsort(axis=1)[:, :10]
lowercase :Any = np.any(preds == actual[:, None] , axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __snake_case ( self : str , snake_case__ : List[str] , snake_case__ : Optional[int] ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case__ , snake_case__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case__ , snake_case__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 172 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str , a_ :Optional[int]) -> Union[str, Any]:
lowercase :List[str] = set()
lowercase :Dict = []
def parse_line(a_ :Dict):
for line in fp:
if isinstance(a_ , a_):
lowercase :Any = line.decode('''UTF-8''')
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' '''):
# process a single warning and move it to `selected_warnings`.
if len(a_) > 0:
lowercase :int = '''\n'''.join(a_)
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets):
selected_warnings.add(a_)
buffer.clear()
continue
else:
lowercase :Any = line.strip()
buffer.append(a_)
if from_gh:
for filename in os.listdir(a_):
lowercase :Optional[int] = os.path.join(a_ , a_)
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with open(a_) as fp:
parse_line(a_)
else:
try:
with zipfile.ZipFile(a_) as z:
for filename in z.namelist():
if not os.path.isdir(a_):
# read the file
if filename != "warnings.txt":
continue
with z.open(a_) as fp:
parse_line(a_)
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""")
return selected_warnings
def lowerCamelCase (a_ :Any , a_ :Optional[int]) -> Any:
lowercase :Tuple = set()
lowercase :Dict = [os.path.join(a_ , a_) for p in os.listdir(a_) if (p.endswith('''.zip''') or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a_ , a_))
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase (a_ :List[Any]) -> Optional[Any]:
return values.split(''',''')
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 172 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowercase = logging.getLogger(__name__)
def lowercase ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , )-> Optional[int]:
'''simple docstring'''
a : Tuple = bnb_quantization_config.load_in_abit
a : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
a : str = []
# custom device map
if isinstance(A_ , A_ ) and len(device_map.keys() ) > 1:
a : str = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a : Tuple = get_keys_to_not_convert(A_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A_ )
a : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a : Union[str, Any] = []
a : Any = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A_ )
# compatibility with peft
a : List[Any] = load_in_abit
a : Tuple = load_in_abit
a : Optional[int] = get_parameter_device(A_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
a : Union[str, Any] = replace_with_bnb_layers(A_ , A_ , modules_to_not_convert=A_ )
# convert param to the right dtype
a : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a : Dict = name.replace(".weight" , "" ).replace(".bias" , "" )
a : Optional[int] = getattr(A_ , A_ , A_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A_ ):
param.to(A_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
a : Optional[Any] = replace_with_bnb_layers(
A_ , A_ , modules_to_not_convert=A_ )
a : Dict = get_quantized_model_device_map(
A_ , A_ , A_ , max_memory=A_ , no_split_module_classes=A_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a : Dict = True
a : List[Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
A_ , A_ , A_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A_ , offload_state_dict=A_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A_ , device_map=A_ , offload_dir=A_ )
def lowercase ( A_ , A_ , A_=None , A_=None , A_=None )-> Any:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
a : Dict = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(A_ , A_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
a : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a : Any = {}
a : Union[str, Any] = special_dtypes
a : Optional[Any] = no_split_module_classes
a : List[str] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a : List[str] = get_balanced_memory(
A_ , low_zero=(device_map == "balanced_low_0") , max_memory=A_ , **A_ , )
a : List[str] = max_memory
a : Dict = infer_auto_device_map(A_ , **A_ )
if isinstance(A_ , A_ ):
# check if don't have any quantized module on the cpu
a : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a : List[str] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowercase ( A_ , A_ , A_=None , A_=None )-> Any:
'''simple docstring'''
if modules_to_not_convert is None:
a : int = []
a , a : Tuple = _replace_with_bnb_layers(
A_ , A_ , A_ , A_ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase ( A_ , A_ , A_=None , A_=None , )-> List[Any]:
'''simple docstring'''
a : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
a : List[Any] = []
current_key_name.append(A_ )
if isinstance(A_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a : List[str] = ".".join(A_ )
a : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a : Optional[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a : Union[str, Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
a : Optional[Any] = module.weight.data
if module.bias is not None:
a : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(A_ )
setattr(A_ , A_ , A_ )
a : Dict = True
if len(list(module.children() ) ) > 0:
a , a : Optional[Any] = _replace_with_bnb_layers(
A_ , A_ , A_ , A_ )
a : List[Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
a : Dict = deepcopy(A_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a : Any = find_tied_parameters(A_ )
# For compatibility with Accelerate < 0.18
if isinstance(A_ , A_ ):
a : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a : Union[str, Any] = sum(A_ , [] )
a : List[Any] = len(A_ ) > 0
# Check if it is a base model
a : Optional[int] = False
if hasattr(A_ , "base_model_prefix" ):
a : int = not hasattr(A_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a : Optional[Any] = list(model.named_children() )
a : str = [list_modules[-1][0]]
# add last module together with tied weights
a : Tuple = set(A_ ) - set(A_ )
a : Optional[int] = list(set(A_ ) ) + list(A_ )
# remove ".weight" from the keys
a : Tuple = [".weight", ".bias"]
a : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a : str = name.replace(A_ , "" )
filtered_module_names.append(A_ )
return filtered_module_names
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
for m in model.modules():
if isinstance(A_ , bnb.nn.Linearabit ):
return True
return False
def lowercase ( A_ )-> Any:
'''simple docstring'''
return next(parameter.parameters() ).device
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> str:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(A_ , A_ , 0 , dtype=A_ , value=A_ )
a : Optional[int] = param_name
a : Union[str, Any] = model
if "." in tensor_name:
a : Optional[Any] = tensor_name.split("." )
for split in splits[:-1]:
a : str = getattr(A_ , A_ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
a : int = new_module
a : Tuple = splits[-1]
# offload weights
a : List[Any] = False
offload_weight(module._parameters[tensor_name] , A_ , A_ , index=A_ )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , A_ , index=A_ , )
else:
offload_weight(A_ , A_ , A_ , index=A_ )
offload_weight(A_ , param_name.replace("weight" , "SCB" ) , A_ , index=A_ )
set_module_tensor_to_device(A_ , A_ , "meta" , dtype=A_ , value=torch.empty(*param.size() ) )
| 40 |
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40 | 1 |
from collections.abc import Iterable
from typing import Any
class _a :
'''simple docstring'''
def __init__( self , A__ = None ):
A__ : List[Any] = value
A__ : Node | None = None # Added in order to delete a node easier
A__ : Node | None = None
A__ : Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class _a :
'''simple docstring'''
def __init__( self , A__ = None ):
A__ : Dict = root
def __str__( self ):
return str(self.root )
def __A ( self , A__ , A__ ):
if new_children is not None: # reset its kids
A__ : int = node.parent
if node.parent is not None: # reset its parent
if self.is_right(A__ ): # If it is the right children
A__ : Optional[Any] = new_children
else:
A__ : Tuple = new_children
else:
A__ : Tuple = new_children
def __A ( self , A__ ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __A ( self ):
return self.root is None
def __A ( self , A__ ):
A__ : List[Any] = Node(A__ ) # create a new Node
if self.empty(): # if Tree is empty
A__ : Optional[Any] = new_node # set its root
else: # Tree is not empty
A__ : Tuple = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
A__ : Tuple = new_node # We insert the new node in a leaf
break
else:
A__ : Any = parent_node.left
else:
if parent_node.right is None:
A__ : int = new_node
break
else:
A__ : Dict = parent_node.right
A__ : Tuple = parent_node
def __A ( self , *A__ ):
for value in values:
self.__insert(A__ )
def __A ( self , A__ ):
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
A__ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
A__ : Union[str, Any] = node.left if value < node.value else node.right
return node
def __A ( self , A__ = None ):
if node is None:
if self.root is None:
return None
A__ : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
A__ : Optional[Any] = node.right
return node
def __A ( self , A__ = None ):
if node is None:
A__ : Tuple = self.root
if self.root is None:
return None
if not self.empty():
A__ : Union[str, Any] = self.root
while node.left is not None:
A__ : List[str] = node.left
return node
def __A ( self , A__ ):
A__ : Any = self.search(A__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(A__ , A__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(A__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(A__ , node.left )
else:
A__ : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
A__ : str = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __A ( self , A__ ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __A ( self , A__=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __A ( self , A__ , A__ ):
if node:
self.inorder(A__ , node.left )
arr.append(node.value )
self.inorder(A__ , node.right )
def __A ( self , A__ , A__ ):
A__ : list[int] = []
self.inorder(A__ , A__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase (lowercase_: Node | None ) -> list[Node]:
A__ : Optional[int] = []
if curr_node is not None:
A__ : Optional[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def UpperCamelCase () -> None:
A__ : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
A__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowercase_ )
# Prints all the elements of the list in order traversal
print(lowercase_ )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 141 |
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = '▁'
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = BertGenerationTokenizer
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Union[str, Any] = True
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
lowerCAmelCase_ :Dict = BertGenerationTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = """<s>"""
lowerCAmelCase_ :Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__A ) , 1002 )
def __lowerCAmelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = BertGenerationTokenizer(__A , keep_accents=__A )
lowerCAmelCase_ :str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ :Dict = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = """Hello World!"""
lowerCAmelCase_ :Optional[Any] = [1_8536, 2260, 101]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase_ :Tuple = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase_ :Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ :str = """ """.join(__A )
lowerCAmelCase_ :Tuple = self.big_tokenizer.encode_plus(__A , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase_ :Any = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase_ :int = BertGenerationConfig()
lowerCAmelCase_ :Tuple = BertGenerationEncoder(__A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
lowerCAmelCase_ :Tuple = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 84 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 | 0 |
"""simple docstring"""
def lowercase ( a__ : int ) -> int:
_UpperCamelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowercase ( a__ : int = 100 ) -> int:
_UpperCamelCase = 1
_UpperCamelCase = 2
for i in range(2 , max_n + 1 ):
_UpperCamelCase = pre_numerator
_UpperCamelCase = 2 * i // 3 if i % 3 == 0 else 1
_UpperCamelCase = cur_numerator
_UpperCamelCase = e_cont * pre_numerator + temp
return sum_digits(a__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 54 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 54 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _A ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return EnvironmentCommand()
def _A ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCAmelCase ( UpperCamelCase__):
@staticmethod
def _lowercase ( lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : Tuple =parser.add_parser("env" )
download_parser.set_defaults(func=lowerCAmelCase__ )
download_parser.add_argument(
"--accelerate-config_file" , default=lowerCAmelCase__ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Optional[Any] =accelerate_config_file
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] ="not installed"
if is_safetensors_available():
import safetensors
a__ : Tuple =safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
a__ : Tuple =F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
a__ : Union[str, Any] ="not installed"
a__ : Union[str, Any] ="not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
a__ : List[str] =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase__ ):
a__ : int =load_config_from_file(self._accelerate_config_file ).to_dict()
a__ : Union[str, Any] =(
"\n".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else F'''\t{accelerate_config}'''
)
a__ : Tuple ="not installed"
a__ : List[str] ="NA"
if is_torch_available():
import torch
a__ : List[Any] =torch.__version__
a__ : Tuple =torch.cuda.is_available()
a__ : Optional[Any] ="not installed"
a__ : Optional[int] ="NA"
if is_tf_available():
import tensorflow as tf
a__ : List[str] =tf.__version__
try:
# deprecated in v2.1
a__ : List[str] =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
a__ : Optional[Any] =bool(tf.config.list_physical_devices("GPU" ) )
a__ : Optional[Any] ="not installed"
a__ : Union[str, Any] ="not installed"
a__ : Optional[int] ="not installed"
a__ : Optional[int] ="NA"
if is_flax_available():
import flax
import jax
import jaxlib
a__ : int =flax.__version__
a__ : Dict =jax.__version__
a__ : str =jaxlib.__version__
a__ : List[str] =jax.lib.xla_bridge.get_backend().platform
a__ : Any ={
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F'''{safetensors_version}''',
"Accelerate version": F'''{accelerate_version}''',
"Accelerate config": F'''{accelerate_config_str}''',
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": F'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": F'''{flax_version} ({jax_backend})''',
"Jax version": F'''{jax_version}''',
"JaxLib version": F'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCAmelCase__ ) )
return info
@staticmethod
def _lowercase ( lowerCAmelCase__ ) -> str:
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 95 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def A__ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_SCREAMING_SNAKE_CASE , use_timestep_embedding=_SCREAMING_SNAKE_CASE , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase = IPNDMScheduler()
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> List[str]:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = DanceDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase = pipe(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A__ ( self ) -> int:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def A__ ( self ) -> Tuple:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def A__ ( self ) -> str:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def A__ ( self ) -> str:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def A__ ( self ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 367 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Any:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def A__ ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase ,UpperCamelCase = image.size
else:
UpperCamelCase ,UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = self.size["""shortest_edge"""]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase ,UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""image_id""": 39769, """annotations""": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
| 183 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = ["""image_processor""", """tokenizer"""]
a__ : str = """ViTImageProcessor"""
a__ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
__lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
__lowerCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__lowerCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__lowerCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def __A ( self : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : Optional[Any] ) -> List[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __A ( self : List[Any] ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 270 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 1 |
'''simple docstring'''
import os
def a_ ( ) -> int:
with open(os.path.dirname(_UpperCAmelCase ) + '/p022_names.txt' ) as file:
__snake_case : Optional[Any] = str(file.readlines()[0] )
__snake_case : int = names.replace('"' ,'' ).split(',' )
names.sort()
__snake_case : Optional[int] = 0
__snake_case : int = 0
for i, name in enumerate(_UpperCAmelCase ):
for letter in name:
name_score += ord(_UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
__snake_case : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0 | 1 |
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
lowerCamelCase_ =abs(_A )
lowerCamelCase_ =0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
lowerCamelCase_ =abs(_A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
return sum(int(_A ) for c in str(abs(_A ) ) )
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_A : Callable , _A : int ) -> None:
lowerCamelCase_ =f'{func.__name__}({value})'
lowerCamelCase_ =timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_A )} -- {timing:.4f} seconds' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_A , _A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 154 |
from __future__ import annotations
import math
def __UpperCamelCase ( _A : int , _A : int , _A : bool , _A : list[int] , _A : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(_A ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
return min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase_ =math.log(len(_A ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , _A , _A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 154 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = dataset
snake_case : Optional[int] = process
snake_case : List[str] = params
def __len__(self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__(self : Optional[int] , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.dataset[i]
snake_case : List[str] = self.process(snake_case__ , **self.params )
return processed
class UpperCAmelCase ( A_ ):
def __init__(self : str , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any]=None ) -> int:
'''simple docstring'''
snake_case : Tuple = loader
snake_case : Union[str, Any] = infer
snake_case : Union[str, Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case : Tuple = None
snake_case : Optional[Any] = loader_batch_size
# Internal bookkeeping
snake_case : List[str] = None
snake_case : Any = None
def __len__(self : int ) -> Optional[Any]:
'''simple docstring'''
return len(self.loader )
def __iter__(self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = iter(self.loader )
return self
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case__ , snake_case__ ):
# Convert ModelOutput to tuple first
snake_case : Optional[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case__ , snake_case__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case : Optional[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case : List[str] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case : Optional[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case : str = self._loader_batch_data.__class__(snake_case__ )
self._loader_batch_index += 1
return result
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[Any]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case : Union[str, Any] = next(self.iterator )
snake_case : Tuple = self.infer(snake_case__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case__ , torch.Tensor ):
snake_case : Optional[Any] = processed
else:
snake_case : Union[str, Any] = list(processed.keys() )[0]
snake_case : Dict = processed[key]
if isinstance(snake_case__ , snake_case__ ):
snake_case : Dict = len(snake_case__ )
else:
snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case : List[str] = observed_batch_size
# Setting internal index to unwrap the batch
snake_case : Tuple = processed
snake_case : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( A_ ):
def __init__(self : Tuple , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any=None ) -> str:
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ , snake_case__ )
def __iter__(self : str ) -> List[str]:
'''simple docstring'''
snake_case : str = iter(self.loader )
snake_case : Any = None
return self
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.subiterator is None:
snake_case : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case : List[str] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case : str = self.infer(next(self.iterator ) , **self.params )
snake_case : List[str] = next(self.subiterator )
return processed
class UpperCAmelCase ( A_ ):
def __iter__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = iter(self.loader )
return self
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = False
snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case : str = self.loader_batch_item()
snake_case : Tuple = item.pop("is_last" )
accumulator.append(snake_case__ )
if is_last:
return accumulator
while not is_last:
snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case__ , torch.Tensor ):
snake_case : Dict = processed
else:
snake_case : Tuple = list(processed.keys() )[0]
snake_case : List[Any] = processed[key]
if isinstance(snake_case__ , snake_case__ ):
snake_case : int = len(snake_case__ )
else:
snake_case : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case : Any = observed_batch_size
snake_case : List[Any] = processed
snake_case : Dict = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case : Union[str, Any] = self.loader_batch_item()
snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case__ )
if is_last:
return accumulator
else:
snake_case : List[str] = processed
snake_case : Union[str, Any] = item.pop("is_last" )
accumulator.append(snake_case__ )
return accumulator
class UpperCAmelCase ( A_ ):
def __init__(self : int , snake_case__ : Dataset , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = dataset
snake_case : Optional[Any] = key
def __len__(self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__(self : List[str] , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase ( A_ ):
def __init__(self : Union[str, Any] , snake_case__ : Dataset , snake_case__ : str , snake_case__ : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = dataset
snake_case : Tuple = keya
snake_case : Union[str, Any] = keya
def __len__(self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__(self : Union[str, Any] , snake_case__ : Optional[int] ) -> str:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 10 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_A : Optional[int] = logging.get_logger(__name__)
_A : str = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = "bloom"
_UpperCAmelCase : str = ["past_key_values"]
_UpperCAmelCase : str = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[Any] , A : Optional[Any]=2_5_0_8_8_0 , A : List[Any]=6_4 , A : Any=2 , A : str=8 , A : Any=1e-5 , A : Tuple=0.02 , A : str=True , A : Dict=1 , A : Any=2 , A : List[Any]=False , A : Optional[Any]=0.0 , A : List[Any]=0.0 , A : str=1 , A : Any=False , **A : List[str] , ) ->str:
lowerCamelCase__ : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
lowerCamelCase__ : List[str] = kwargs.pop('''n_embed''' , A )
lowerCamelCase__ : List[str] = hidden_size if n_embed is None else n_embed
lowerCamelCase__ : Optional[Any] = n_layer
lowerCamelCase__ : str = n_head
lowerCamelCase__ : Dict = layer_norm_epsilon
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Optional[Any] = pretraining_tp
lowerCamelCase__ : Dict = apply_residual_connection_post_layernorm
lowerCamelCase__ : Optional[int] = hidden_dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : List[str] = bos_token_id
lowerCamelCase__ : List[str] = eos_token_id
lowerCamelCase__ : List[Any] = slow_but_exact
super().__init__(bos_token_id=A , eos_token_id=A , **A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = version.parse("1.12" )
def __init__( self : List[str] , A : PretrainedConfig , A : str = "default" , A : List[PatchingSpec] = None , A : bool = False , ) ->int:
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , '''pad_token_id''' , A ):
# TODO: how to do that better?
lowerCamelCase__ : Any = 0
@property
def __lowerCamelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : Optional[int] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(A , direction='''inputs''' , inverted_values_shape=A )
lowerCamelCase__ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCamelCase__ : int = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCamelCase ( self : str ) ->int:
return self._config.n_layer
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->int:
return self._config.n_head
@property
def __lowerCamelCase ( self : List[Any] ) ->float:
return 1e-3
def __lowerCamelCase ( self : List[Any] , A : "PreTrainedTokenizer" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
lowerCamelCase__ : int = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : str = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[int] = seqlen + 2
lowerCamelCase__ : Any = self._config.hidden_size // self.num_attention_heads
lowerCamelCase__ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCamelCase__ : Optional[int] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCamelCase__ : Tuple = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Optional[int] = common_inputs['''attention_mask''']
if self.use_past:
lowerCamelCase__ : Any = ordered_inputs['''attention_mask'''].dtype
lowerCamelCase__ : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self : Dict ) ->int:
return 1_3
| 142 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase__ : List[str] = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(UpperCAmelCase ) )[2:]
lowerCamelCase__ : Dict = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowercase__ ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Any =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = pipeline(
"document-question-answering" , model=snake_case__ , tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : str = INVOICE_URL
lowerCAmelCase : Tuple = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , "" ) ) )
lowerCAmelCase : Optional[Any] = "What is the placebo?"
lowerCAmelCase : List[Any] = [
{
"image": load_image(snake_case__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = dqa_pipeline(snake_case__ , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{"score": ANY(snake_case__ ), "answer": ANY(snake_case__ ), "start": ANY(snake_case__ ), "end": ANY(snake_case__ )},
{"score": ANY(snake_case__ ), "answer": ANY(snake_case__ ), "start": ANY(snake_case__ ), "end": ANY(snake_case__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCAmelCase : Optional[Any] = INVOICE_URL
lowerCAmelCase : str = "How many cats are there?"
lowerCAmelCase : int = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCAmelCase : List[str] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
lowerCAmelCase : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase : List[str] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase : Union[str, Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase : List[str] = []
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = dqa_pipeline(image=snake_case__ , question=snake_case__ , words=snake_case__ , boxes=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCAmelCase : str = INVOICE_URL
lowerCAmelCase : Optional[int] = "What is the invoice number?"
lowerCAmelCase : List[Any] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Optional[int] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCAmelCase : Optional[int] = INVOICE_URL
lowerCAmelCase : Union[str, Any] = "What is the invoice number?"
lowerCAmelCase : Tuple = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : List[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case__ )
lowerCAmelCase : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case__ , revision="3dc6de3" , )
lowerCAmelCase : str = INVOICE_URL
lowerCAmelCase : Dict = "What is the invoice number?"
lowerCAmelCase : Optional[Any] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCAmelCase : List[Any] = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case__ )
lowerCAmelCase : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case__ , revision="3dc6de3" , max_seq_len=50 , )
lowerCAmelCase : Any = INVOICE_URL
lowerCAmelCase : Dict = "What is the invoice number?"
lowerCAmelCase : Union[str, Any] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCAmelCase : Tuple = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase : int = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCAmelCase : Optional[Any] = INVOICE_URL
lowerCAmelCase : Union[str, Any] = "What is the invoice number?"
lowerCAmelCase : Union[str, Any] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
| 133 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any ="tapas"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_024 , snake_case__=[3, 256, 256, 2, 256, 256, 10] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=10.0 , snake_case__=0 , snake_case__=1.0 , snake_case__=None , snake_case__=1.0 , snake_case__=False , snake_case__=None , snake_case__=1.0 , snake_case__=1.0 , snake_case__=False , snake_case__=False , snake_case__="ratio" , snake_case__=None , snake_case__=None , snake_case__=64 , snake_case__=32 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_sizes
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase : Dict = positive_label_weight
lowerCAmelCase : Union[str, Any] = num_aggregation_labels
lowerCAmelCase : Optional[Any] = aggregation_loss_weight
lowerCAmelCase : List[Any] = use_answer_as_supervision
lowerCAmelCase : Dict = answer_loss_importance
lowerCAmelCase : List[Any] = use_normalized_answer_loss
lowerCAmelCase : List[str] = huber_loss_delta
lowerCAmelCase : Optional[int] = temperature
lowerCAmelCase : Optional[int] = aggregation_temperature
lowerCAmelCase : Any = use_gumbel_for_cells
lowerCAmelCase : Union[str, Any] = use_gumbel_for_aggregation
lowerCAmelCase : Union[str, Any] = average_approximation_function
lowerCAmelCase : int = cell_selection_preference
lowerCAmelCase : Dict = answer_loss_cutoff
lowerCAmelCase : Optional[int] = max_num_rows
lowerCAmelCase : Union[str, Any] = max_num_columns
lowerCAmelCase : Any = average_logits_per_cell
lowerCAmelCase : List[Any] = select_one_column
lowerCAmelCase : Tuple = allow_empty_column_selection
lowerCAmelCase : str = init_cell_selection_weights_to_zero
lowerCAmelCase : List[Any] = reset_position_index_per_cell
lowerCAmelCase : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase : List[str] = aggregation_labels
lowerCAmelCase : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case__ ):
lowerCAmelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
| 133 | 1 |
from __future__ import annotations
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
# Checks if the entire collection has been sorted
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : int ):
# Checks order between adjacent elements
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCamelCase_ : str = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
a_ = input('Enter integers separated by spaces: ')
a_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 175 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case( *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 ) -> Optional[Any]:
from .. import __version__
lowercase : int = take_from
lowercase : Tuple = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowercase : int = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
lowercase : Union[str, Any] = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
lowercase : int = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowercase : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowercase : Dict = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase : str = inspect.getouterframes(inspect.currentframe() )[1]
lowercase : List[str] = call_frame.filename
lowercase : Tuple = call_frame.lineno
lowercase : List[str] = call_frame.function
lowercase , lowercase : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 20 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : Any = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : Optional[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : int = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_1_2,
"facebook/dpr-ctx_encoder-multiset-base": 5_1_2,
}
UpperCamelCase : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": 5_1_2,
"facebook/dpr-question_encoder-multiset-base": 5_1_2,
}
UpperCamelCase : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": 5_1_2,
"facebook/dpr-reader-multiset-base": 5_1_2,
}
UpperCamelCase : Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase : Any = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase : str = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase = DPRContextEncoderTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase = DPRQuestionEncoderTokenizer
UpperCamelCase : str = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCamelCase : Union[str, Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCamelCase : Tuple = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase :
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
elif titles is None or texts is None:
__UpperCamelCase = titles if texts is None else texts
return super().__call__(
__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [titles]
__UpperCamelCase = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [texts]
__UpperCamelCase = len(__UpperCAmelCase )
__UpperCamelCase = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [questions] * n_passages
assert len(__UpperCAmelCase ) == len(
__UpperCAmelCase ), F'There should be as many titles than texts but got {len(__UpperCAmelCase )} titles and {len(__UpperCAmelCase )} texts.'
__UpperCamelCase = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )['input_ids']
__UpperCamelCase = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )['input_ids']
__UpperCamelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase )
]
}
if return_attention_mask is not False:
__UpperCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCamelCase = attention_mask
return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = 4 , ):
'''simple docstring'''
__UpperCamelCase = reader_input['input_ids']
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = reader_output[:3]
__UpperCamelCase = len(__UpperCAmelCase )
__UpperCamelCase = sorted(range(__UpperCAmelCase ) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__ )
__UpperCamelCase = []
for doc_id in sorted_docs:
__UpperCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCamelCase = sequence_ids.index(self.pad_token_id )
else:
__UpperCamelCase = len(__UpperCAmelCase )
__UpperCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = []
for start_index, start_score in enumerate(__UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCamelCase = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
__UpperCamelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__UpperCamelCase = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
lowercase = DPRReaderTokenizer
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCamelCase : List[Any] = False
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = StableDiffusionAttendAndExcitePipeline
lowercase = False
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase = CLIPTextModel(__UpperCAmelCase )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = __UpperCamelCase = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__UpperCamelCase = pipe(**__UpperCAmelCase ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__UpperCamelCase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = torch.manual_seed(51 )
__UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
__UpperCamelCase = 'a painting of an elephant with glasses'
__UpperCamelCase = [5, 7]
__UpperCamelCase = pipe(
prompt=__UpperCAmelCase , token_indices=__UpperCAmelCase , guidance_scale=7.5 , generator=__UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( UpperCamelCase__ ):
UpperCAmelCase__ = """Speech2TextFeatureExtractor"""
UpperCAmelCase__ = """Speech2TextTokenizer"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str ) -> List[Any]:
super().__init__(lowercase_ , lowercase_ )
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : List[str] ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : int = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : List[str] = kwargs.pop('audio' , lowercase_ )
lowerCamelCase__ : Dict = kwargs.pop('sampling_rate' , lowercase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text' , lowercase_ )
if len(lowercase_ ) > 0:
lowerCamelCase__ : int = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Dict = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_ )
if text is not None:
lowerCamelCase__ : Tuple = self.tokenizer(lowercase_ , **lowercase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : str = encodings['input_ids']
return inputs
def A_ ( self : int , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def A_ ( self : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@contextmanager
def A_ ( self : Dict ) -> Dict:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[str] = self.tokenizer
yield
lowerCamelCase__ : Union[str, Any] = self.feature_extractor
lowerCamelCase__ : Optional[Any] = False
| 50 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__lowerCAmelCase : List[str] ={
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase : Dict ={
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase : Union[str, Any] ={
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase : str ={
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__lowerCAmelCase : Tuple ={
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__lowerCAmelCase : Dict ={
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase ( _lowerCamelCase : Tuple ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]=False ):
A__ = checkpoint[F"{old_prefix}.in_layers.0.weight"]
A__ = checkpoint[F"{old_prefix}.in_layers.0.bias"]
A__ = checkpoint[F"{old_prefix}.in_layers.2.weight"]
A__ = checkpoint[F"{old_prefix}.in_layers.2.bias"]
A__ = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
A__ = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
A__ = checkpoint[F"{old_prefix}.out_layers.0.weight"]
A__ = checkpoint[F"{old_prefix}.out_layers.0.bias"]
A__ = checkpoint[F"{old_prefix}.out_layers.3.weight"]
A__ = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
A__ = checkpoint[F"{old_prefix}.skip_connection.weight"]
A__ = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=None ):
A__, A__, A__ = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
A__, A__, A__ = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
A__ = checkpoint[F"{old_prefix}.norm.weight"]
A__ = checkpoint[F"{old_prefix}.norm.bias"]
A__ = weight_q.squeeze(-1 ).squeeze(-1 )
A__ = bias_q.squeeze(-1 ).squeeze(-1 )
A__ = weight_k.squeeze(-1 ).squeeze(-1 )
A__ = bias_k.squeeze(-1 ).squeeze(-1 )
A__ = weight_v.squeeze(-1 ).squeeze(-1 )
A__ = bias_v.squeeze(-1 ).squeeze(-1 )
A__ = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
A__ = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : List[str] ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
A__ = {}
A__ = checkpoint["time_embed.0.weight"]
A__ = checkpoint["time_embed.0.bias"]
A__ = checkpoint["time_embed.2.weight"]
A__ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
A__ = checkpoint["label_emb.weight"]
A__ = checkpoint["input_blocks.0.0.weight"]
A__ = checkpoint["input_blocks.0.0.bias"]
A__ = unet_config["down_block_types"]
A__ = unet_config["layers_per_block"]
A__ = unet_config["attention_head_dim"]
A__ = unet_config["block_out_channels"]
A__ = 1
A__ = channels_list[0]
for i, layer_type in enumerate(_lowerCamelCase ):
A__ = channels_list[i]
A__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_lowerCamelCase ):
A__ = F"down_blocks.{i}.resnets.{j}"
A__ = F"input_blocks.{current_layer}.0"
A__ = True if j == 0 and downsample_block_has_skip else False
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_lowerCamelCase ):
A__ = F"down_blocks.{i}.resnets.{j}"
A__ = F"input_blocks.{current_layer}.0"
A__ = True if j == 0 and downsample_block_has_skip else False
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
A__ = F"down_blocks.{i}.attentions.{j}"
A__ = F"input_blocks.{current_layer}.1"
A__ = convert_attention(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
A__ = F"down_blocks.{i}.downsamplers.0"
A__ = F"input_blocks.{current_layer}.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
A__ = current_channels
# hardcoded the mid-block for now
A__ = "mid_block.resnets.0"
A__ = "middle_block.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "mid_block.attentions.0"
A__ = "middle_block.1"
A__ = convert_attention(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "mid_block.resnets.1"
A__ = "middle_block.2"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = 0
A__ = unet_config["up_block_types"]
for i, layer_type in enumerate(_lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A__ = F"up_blocks.{i}.resnets.{j}"
A__ = F"output_blocks.{current_layer}.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
A__ = F"up_blocks.{i}.upsamplers.0"
A__ = F"output_blocks.{current_layer-1}.1"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A__ = F"up_blocks.{i}.resnets.{j}"
A__ = F"output_blocks.{current_layer}.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
A__ = F"up_blocks.{i}.attentions.{j}"
A__ = F"output_blocks.{current_layer}.1"
A__ = convert_attention(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
A__ = F"up_blocks.{i}.upsamplers.0"
A__ = F"output_blocks.{current_layer-1}.2"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = checkpoint["out.0.weight"]
A__ = checkpoint["out.0.bias"]
A__ = checkpoint["out.2.weight"]
A__ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__lowerCAmelCase : Optional[Any] =parser.parse_args()
__lowerCAmelCase : List[Any] =strabool(args.class_cond)
__lowerCAmelCase : List[str] =os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__lowerCAmelCase : List[str] =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase : List[str] =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__lowerCAmelCase : Any =TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__lowerCAmelCase : Dict =None
__lowerCAmelCase : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
__lowerCAmelCase : Dict =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__lowerCAmelCase : List[str] =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__lowerCAmelCase : Dict =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase : Dict =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
__lowerCAmelCase : Dict =CMStochasticIterativeScheduler(**scheduler_config)
__lowerCAmelCase : str =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 237 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A_ : Dict = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: List[str] , lowercase_: int ) -> Dict:
try:
with open(lowercase_ , """rb""" ) as flax_state_f:
A__ : Optional[Any] = from_bytes(lowercase_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Any , lowercase_: Dict ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A__ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowercase_ : x.dtype == jnp.bfloataa , lowercase_ ) ).values()
if any(lowercase_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A__ : Union[str, Any] = jax.tree_util.tree_map(
lambda lowercase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase_ )
A__ : str = """"""
A__ : Tuple = flatten_dict(lowercase_ , sep=""".""" )
A__ : Dict = pt_model.state_dict()
# keep track of unexpected & missing keys
A__ : int = []
A__ : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A__ : Tuple = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A__ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A__ : Tuple = jnp.transpose(lowercase_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A__ : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A__ : List[str] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase_ ):
A__ : List[Any] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A__ : Optional[int] = """.""".join(lowercase_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
A__ : Tuple = np.asarray(lowercase_ ) if not isinstance(lowercase_ , np.ndarray ) else flax_tensor
A__ : Tuple = torch.from_numpy(lowercase_ )
# remove from missing keys
missing_keys.remove(lowercase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase_ )
pt_model.load_state_dict(lowercase_ )
# re-transform missing_keys to list
A__ : str = list(lowercase_ )
if len(lowercase_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowercase_ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 141 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Optional[Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_MAPPING
A_ : Any = auto_class_update(FlaxAutoModel)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _a (_BaseAutoModelClass ):
'''simple docstring'''
UpperCAmelCase__: int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : List[str] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 141 | 1 |
import os
def _a ( ) -> Union[str, Any]:
with open(os.path.dirname(a ) + '''/p022_names.txt''' ) as file:
a = str(file.readlines()[0] )
a = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
a = 0
a = 0
for i, name in enumerate(a ):
for letter in name:
name_score += ord(a ) - 64
total_score += (i + 1) * name_score
a = 0
return total_score
if __name__ == "__main__":
print(solution())
| 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCamelCase_ ( self : List[str] ,A : Dict=0 ):
__A = floats_tensor((1, 3, 1_28, 1_28) ,rng=random.Random(A ) )
__A = np.random.RandomState(A )
__A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ ( self : int ):
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=A )
__A = self.get_dummy_inputs()
__A = pipe(**A ).images
__A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self : Optional[Any] ):
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
__A = self.get_dummy_inputs()
__A = pipe(**A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
# warmup pass to apply optimizations
__A = pipe(**self.get_dummy_inputs() )
__A = self.get_dummy_inputs()
__A = pipe(**A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self : Optional[Any] ):
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
__A = self.get_dummy_inputs()
__A = pipe(**A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self : str ):
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
__A = self.get_dummy_inputs()
__A = pipe(**A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self : Optional[int] ):
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
__A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
__A = self.get_dummy_inputs()
__A = pipe(**A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self : str ):
__A = ort.SessionOptions()
__A = False
return options
def UpperCamelCase_ ( self : Optional[Any] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="onnx" ,safety_checker=A ,feature_extractor=A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A )
__A = "A fantasy landscape, trending on artstation"
__A = np.random.RandomState(0 )
__A = pipe(
prompt=A ,image=A ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A ,output_type="np" ,)
__A = output.images
__A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCamelCase_ ( self : List[Any] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__A = init_image.resize((7_68, 5_12) )
__A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,subfolder="scheduler" ,revision="onnx" )
__A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,revision="onnx" ,scheduler=A ,safety_checker=A ,feature_extractor=A ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A )
__A = "A fantasy landscape, trending on artstation"
__A = np.random.RandomState(0 )
__A = pipe(
prompt=A ,image=A ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A ,output_type="np" ,)
__A = output.images
__A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 360 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,A : Optional[Any] ,A : List[Any] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : Tuple ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : float = 0.0 ,A : int = 50 ,A : Optional[bool] = None ,A : Optional[str] = "pil" ,A : bool = True ,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,A ):
__A = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__A = randn_tensor(A ,generator=A ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(A ,A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
A ,A ,A ,eta=A ,use_clipped_model_output=A ,generator=A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 ,1 )
__A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 124 | 0 |
import math
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """"""
try:
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = """""", """"""
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE = last_match_id + """0"""
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
SCREAMING_SNAKE_CASE = {}
for curr_key in list(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = lexicon.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = new_lex
SCREAMING_SNAKE_CASE = last_match_id + """1"""
index += 1
SCREAMING_SNAKE_CASE = """"""
return result
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 8
try:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE = data_bits[counter:]
SCREAMING_SNAKE_CASE = data_bits[counter + 1 :]
return data_bits
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = read_file_binary(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = remove_prefix(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = decompress_data(_SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 296 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Dict = 10
__magic_name__ : int = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
__magic_name__ : List[Any] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(_snake_case ) ),
} , features=_snake_case , )
return dataset
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : int ) -> List[str]:
'''simple docstring'''
__magic_name__ : str = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_snake_case )
return filename
# FILE_CONTENT + files
snake_case : int = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
__magic_name__ : List[Any] = tmp_path_factory.mktemp("data" ) / '''file.txt'''
__magic_name__ : Union[str, Any] = FILE_CONTENT
with open(_snake_case , "w" ) as f:
f.write(_snake_case )
return filename
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Any ) -> int:
'''simple docstring'''
import bza
__magic_name__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / '''file.txt.bz2'''
__magic_name__ : int = bytes(_snake_case , "utf-8" )
with bza.open(_snake_case , "wb" ) as f:
f.write(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import gzip
__magic_name__ : Tuple = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
__magic_name__ : Union[str, Any] = bytes(_snake_case , "utf-8" )
with gzip.open(_snake_case , "wb" ) as f:
f.write(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__magic_name__ : List[str] = tmp_path_factory.mktemp("data" ) / '''file.txt.lz4'''
__magic_name__ : Optional[Any] = bytes(_snake_case , "utf-8" )
with lza.frame.open(_snake_case , "wb" ) as f:
f.write(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__magic_name__ : List[Any] = tmp_path_factory.mktemp("data" ) / '''file.txt.7z'''
with pyazr.SevenZipFile(_snake_case , "w" ) as archive:
archive.write(_snake_case , arcname=os.path.basename(_snake_case ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import tarfile
__magic_name__ : Any = tmp_path_factory.mktemp("data" ) / '''file.txt.tar'''
with tarfile.TarFile(_snake_case , "w" ) as f:
f.add(_snake_case , arcname=os.path.basename(_snake_case ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Any ) -> int:
'''simple docstring'''
import lzma
__magic_name__ : List[str] = tmp_path_factory.mktemp("data" ) / '''file.txt.xz'''
__magic_name__ : Any = bytes(_snake_case , "utf-8" )
with lzma.open(_snake_case , "wb" ) as f:
f.write(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Optional[int] ) -> Optional[int]:
'''simple docstring'''
import zipfile
__magic_name__ : Optional[Any] = tmp_path_factory.mktemp("data" ) / '''file.txt.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : str ) -> int:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__magic_name__ : List[Any] = tmp_path_factory.mktemp("data" ) / '''file.txt.zst'''
__magic_name__ : List[str] = bytes(_snake_case , "utf-8" )
with zstd.open(_snake_case , "wb" ) as f:
f.write(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Any = tmp_path_factory.mktemp("data" ) / '''file.xml'''
__magic_name__ : Union[str, Any] = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_snake_case , "w" ) as f:
f.write(_snake_case )
return filename
snake_case : Any = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
snake_case : Tuple = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
snake_case : List[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
snake_case : Union[str, Any] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
snake_case : str = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Any = datasets.Dataset.from_dict(_snake_case )
__magic_name__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Dict ) -> Any:
'''simple docstring'''
__magic_name__ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_snake_case ) ) as con:
__magic_name__ : str = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Dict ) -> str:
'''simple docstring'''
__magic_name__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_snake_case , "w" , newline="" ) as f:
__magic_name__ : Optional[int] = csv.DictWriter(_snake_case , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : str = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_snake_case , "w" , newline="" ) as f:
__magic_name__ : Optional[Any] = csv.DictWriter(_snake_case , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
import bza
__magic_name__ : Tuple = tmp_path_factory.mktemp("data" ) / '''dataset.csv.bz2'''
with open(_snake_case , "rb" ) as f:
__magic_name__ : str = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_snake_case , "wb" ) as f:
f.write(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : str , _snake_case : List[str] ) -> str:
'''simple docstring'''
__magic_name__ : Tuple = tmp_path_factory.mktemp("data" ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : int , _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = tmp_path_factory.mktemp("data" ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(_snake_case , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : int ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Dict = tmp_path_factory.mktemp("data" ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.join("main_dir" , os.path.basename(_snake_case ) ) )
f.write(_snake_case , arcname=os.path.join("main_dir" , os.path.basename(_snake_case ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
__magic_name__ : Optional[int] = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_snake_case , "wb" ) as f:
__magic_name__ : str = pq.ParquetWriter(_snake_case , schema=_snake_case )
__magic_name__ : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_snake_case ) )] for k in DATA[0]} , schema=_snake_case )
writer.write_table(_snake_case )
writer.close()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__magic_name__ : List[Any] = {'''data''': DATA}
with open(_snake_case , "w" ) as f:
json.dump(_snake_case , _snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Any:
'''simple docstring'''
__magic_name__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__magic_name__ : Optional[int] = {'''data''': DATA_DICT_OF_LISTS}
with open(_snake_case , "w" ) as f:
json.dump(_snake_case , _snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_snake_case , "w" ) as f:
for item in DATA:
f.write(json.dumps(_snake_case ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : str ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_snake_case , "w" ) as f:
for item in DATA:
f.write(json.dumps(_snake_case ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : int ) -> Any:
'''simple docstring'''
__magic_name__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_snake_case , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_snake_case ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_snake_case , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_snake_case ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import gzip
__magic_name__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_snake_case , "rb" ) as orig_file:
with gzip.open(_snake_case , "wb" ) as zipped_file:
zipped_file.writelines(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Any ) -> Dict:
'''simple docstring'''
import gzip
__magic_name__ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_snake_case , "rb" ) as orig_file:
with gzip.open(_snake_case , "wb" ) as zipped_file:
zipped_file.writelines(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : int ) -> Tuple:
'''simple docstring'''
__magic_name__ : Dict = tmp_path_factory.mktemp("data" ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : str = tmp_path_factory.mktemp("data" ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.join("nested" , os.path.basename(_snake_case ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict , _snake_case : int ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = tmp_path_factory.mktemp("data" ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.join("main_dir" , os.path.basename(_snake_case ) ) )
f.write(_snake_case , arcname=os.path.join("main_dir" , os.path.basename(_snake_case ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Tuple ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = tmp_path_factory.mktemp("data" ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(_snake_case , "w" ) as f:
f.add(_snake_case , arcname=os.path.basename(_snake_case ) )
f.add(_snake_case , arcname=os.path.basename(_snake_case ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = tmp_path_factory.mktemp("data" ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(_snake_case , "w" ) as f:
f.add(_snake_case , arcname=os.path.join("nested" , os.path.basename(_snake_case ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : List[str] = ['''0''', '''1''', '''2''', '''3''']
__magic_name__ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_snake_case , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Any:
'''simple docstring'''
__magic_name__ : List[str] = ['''0''', '''1''', '''2''', '''3''']
__magic_name__ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_snake_case , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = ['''0''', '''1''', '''2''', '''3''']
__magic_name__ : List[Any] = tmp_path_factory.mktemp("data" ) / '''dataset.abc'''
with open(_snake_case , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Dict , _snake_case : List[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Dict = tmp_path_factory.mktemp("data" ) / '''dataset.text.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Any , _snake_case : Any ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[Any] = tmp_path_factory.mktemp("data" ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.join("main_dir" , os.path.basename(_snake_case ) ) )
f.write(_snake_case , arcname=os.path.join("main_dir" , os.path.basename(_snake_case ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Any ) -> List[Any]:
'''simple docstring'''
__magic_name__ : int = tmp_path_factory.mktemp("data" ) / '''dataset.ext.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.basename("unsupported.ext" ) )
f.write(_snake_case , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[str] ) -> int:
'''simple docstring'''
__magic_name__ : int = '''\n'''.join(["First", "Second\u2029with Unicode new line", "Third"] )
__magic_name__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(_snake_case )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : str = tmp_path_factory.mktemp("data" ) / '''dataset.img.zip'''
with zipfile.ZipFile(_snake_case , "w" ) as f:
f.write(_snake_case , arcname=os.path.basename(_snake_case ) )
f.write(_snake_case , arcname=os.path.basename(_snake_case ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( _snake_case : Tuple ) -> Any:
'''simple docstring'''
__magic_name__ : Union[str, Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 370 |
from scipy.stats import pearsonr
import datasets
snake_case : Tuple = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
snake_case : Dict = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
snake_case : int = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=False ):
if return_pvalue:
__magic_name__ : Union[str, Any] = pearsonr(_a , _a )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_a , _a )[0] )}
| 41 | 0 |
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=None , ) -> Union[str, Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_stages
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = out_features
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = scope
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> List[str]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase_ = None
lowerCAmelCase_ = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
def __a ( self ) -> str:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __a , __a , unittest.TestCase ):
_lowercase =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowercase =(
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowercase =False
_lowercase =False
_lowercase =False
_lowercase =False
_lowercase =False
def __a ( self ) -> Tuple:
lowerCAmelCase_ = ConvNextVaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __a ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> Any:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __a ( self ) -> List[Any]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __a ( self ) -> int:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __a ( self ) -> str:
pass
def __a ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase_ = True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
lowerCAmelCase_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
lowerCAmelCase_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowerCAmelCase_ = model(**_UpperCamelCase ).loss
loss.backward()
def __a ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase_ = False
lowerCAmelCase_ = True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
lowerCAmelCase_ = model(**_UpperCamelCase ).loss
loss.backward()
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_UpperCamelCase )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> List[Any]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __a ( self ) -> Optional[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> int:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(_UpperCamelCase )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = preprocessor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_UpperCamelCase )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCAmelCase_ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 231 | 1 |
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = '''openai/whisper-base'''
_SCREAMING_SNAKE_CASE : str = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
_SCREAMING_SNAKE_CASE : Dict = '''transcriber'''
_SCREAMING_SNAKE_CASE : Any = WhisperProcessor
_SCREAMING_SNAKE_CASE : Union[str, Any] = WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''audio''']
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
def lowerCAmelCase (self : Optional[int] , snake_case_ : Optional[Any] ):
return self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''' ).input_features
def lowerCAmelCase (self : int , snake_case_ : Optional[int] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def lowerCAmelCase (self : Any , snake_case_ : Tuple ):
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
| 361 |
import os
import sys
import unittest
lowercase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase__ =os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase__ =os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : str = get_test_to_tester_mapping(snake_case_ )
__a : Tuple = get_test_to_tester_mapping(snake_case_ )
__a : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__a : Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : str ):
__a : Optional[int] = get_model_to_test_mapping(snake_case_ )
__a : Any = get_model_to_test_mapping(snake_case_ )
__a : List[Any] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__a : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Any = get_model_to_tester_mapping(snake_case_ )
__a : List[str] = get_model_to_tester_mapping(snake_case_ )
__a : Any = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__a : int = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
| 90 | 0 |
def __magic_name__ ( __a : list , __a : list ):
'''simple docstring'''
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__a , __a ) ) )
def __magic_name__ ( __a : list[float] ):
'''simple docstring'''
if point:
if isinstance(__a , __a ):
for item in point:
if not isinstance(__a , (int, float) ):
UpperCamelCase__ = (
"""Expected a list of numbers as input, found """
f"{type(__a ).__name__}"
)
raise TypeError(__a )
else:
UpperCamelCase__ = f"Expected a list of numbers as input, found {type(__a ).__name__}"
raise TypeError(__a )
else:
raise ValueError("""Missing an input""" )
def __magic_name__ ( __a : list , __a : list ):
'''simple docstring'''
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__a , __a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# map from node name to the node object
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class __A( Generic[T] ):
"""simple docstring"""
def __init__(self ):
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def UpperCAmelCase_ (self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 244 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: int , _SCREAMING_SNAKE_CASE: str = "▁" , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Union[str, AddedToken] = "<unk>" , _SCREAMING_SNAKE_CASE: Union[str, AddedToken] = "</s>" , _SCREAMING_SNAKE_CASE: Union[str, AddedToken] = "<pad>" , ) -> int:
"""simple docstring"""
UpperCamelCase_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
UpperCamelCase_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCamelCase_ = token_dict["token"]
UpperCamelCase_ = Tokenizer(Unigram() )
UpperCamelCase_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
UpperCamelCase_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=_SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
UpperCamelCase_ = decoders.Metaspace(replacement=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = TemplateProcessing(
single=f'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
UpperCamelCase_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Union[str, List[str]] , _SCREAMING_SNAKE_CASE: int = 8000 , _SCREAMING_SNAKE_CASE: bool = True , ) -> int:
"""simple docstring"""
UpperCamelCase_ = trainers.UnigramTrainer(
vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [files]
self._tokenizer.train(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE )
self.add_unk_id()
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Union[Iterator[str], Iterator[Iterator[str]]] , _SCREAMING_SNAKE_CASE: int = 8000 , _SCREAMING_SNAKE_CASE: bool = True , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = trainers.UnigramTrainer(
vocab_size=_SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=_SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(_SCREAMING_SNAKE_CASE , trainer=_SCREAMING_SNAKE_CASE )
self.add_unk_id()
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = json.loads(self._tokenizer.to_str() )
UpperCamelCase_ = self.special_tokens["unk"]["id"]
UpperCamelCase_ = Tokenizer.from_str(json.dumps(_SCREAMING_SNAKE_CASE ) )
| 363 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.