code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCamelCase_ = data_utils.TransfoXLTokenizer
UpperCamelCase_ = data_utils.TransfoXLCorpus
UpperCamelCase_ = data_utils
UpperCamelCase_ = data_utils
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__UpperCAmelCase , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__UpperCAmelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __UpperCAmelCase )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__UpperCAmelCase )
UpperCAmelCase_ = os.path.abspath(__UpperCAmelCase )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__UpperCAmelCase )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
print(f"Save PyTorch model to {os.path.abspath(__UpperCAmelCase )}" )
torch.save(model.state_dict() , __UpperCAmelCase )
print(f"Save configuration file to {os.path.abspath(__UpperCAmelCase )}" )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
UpperCamelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
from math import factorial
def A ( __UpperCAmelCase = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase_ = n // 2
return int(factorial(__UpperCAmelCase ) / (factorial(__UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = "▁"
UpperCamelCase_ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCamelCase_ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
UpperCamelCase_ = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
UpperCamelCase_ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class a_ ( _snake_case ):
UpperCamelCase__ : str =VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[int] =["input_ids", "attention_mask"]
UpperCamelCase__ : List[int] =[]
UpperCamelCase__ : List[int] =[]
def __init__( self :Optional[Any] , _lowercase :Dict , _lowercase :List[str] , _lowercase :Dict=None , _lowercase :Union[str, Any]=None , _lowercase :Union[str, Any]="<s>" , _lowercase :Any="</s>" , _lowercase :List[Any]="</s>" , _lowercase :List[str]="<pad>" , _lowercase :Union[str, Any]="<unk>" , _lowercase :Tuple="m2m100" , _lowercase :Optional[Dict[str, Any]] = None , _lowercase :str=8 , **_lowercase :Any , ) -> None:
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = language_codes
UpperCAmelCase_ = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase_ = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
UpperCAmelCase_ = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowercase)
for lang_code in fairseq_language_code
if self.get_lang_token(_lowercase) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowercase , tgt_lang=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , language_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowercase , **_lowercase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = load_json(_lowercase)
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = spm_file
UpperCAmelCase_ = load_spm(_lowercase , self.sp_model_kwargs)
UpperCAmelCase_ = len(self.encoder)
UpperCAmelCase_ = {
self.get_lang_token(_lowercase): self.encoder_size + i for i, lang_code in enumerate(_lowercase)
}
UpperCAmelCase_ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowercase)}
UpperCAmelCase_ = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase_ = src_lang if src_lang is not None else '''en'''
UpperCAmelCase_ = tgt_lang
UpperCAmelCase_ = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
UpperCAmelCase_ = num_madeup_words
@property
def __a ( self :Optional[Any]) -> int:
return len(self.encoder) + len(self.lang_token_to_id)
@property
def __a ( self :int) -> str:
return self._src_lang
@src_lang.setter
def __a ( self :Dict , _lowercase :str) -> None:
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __a ( self :Optional[Any] , _lowercase :str) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase)
def __a ( self :Optional[int] , _lowercase :Tuple) -> List[str]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowercase , self.encoder[self.unk_token])
def __a ( self :int , _lowercase :int) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowercase , self.unk_token)
def __a ( self :str , _lowercase :Any) -> Any:
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_lowercase)
out_string += self.sp_model.decode(_lowercase)
return out_string.strip()
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
UpperCAmelCase_ = [1] * len(self.prefix_tokens)
UpperCAmelCase_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase)) + suffix_ones
return prefix_ones + ([0] * len(_lowercase)) + ([0] * len(_lowercase)) + suffix_ones
def __a ( self :List[Any] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :List[str]) -> Dict:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :str , _lowercase :Dict) -> None:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs)
def __a ( self :Tuple , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
UpperCAmelCase_ = Path(_lowercase)
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory")
UpperCAmelCase_ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase_ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowercase)
if os.path.abspath(self.spm_file) != os.path.abspath(_lowercase) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , _lowercase)
elif not os.path.isfile(self.spm_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (str(_lowercase), str(_lowercase))
def __a ( self :Union[str, Any] , _lowercase :List[str] , _lowercase :str = "en" , _lowercase :Optional[List[str]] = None , _lowercase :str = "ro" , **_lowercase :Tuple , ) -> BatchEncoding:
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase)
def __a ( self :int , _lowercase :Dict , _lowercase :Optional[str] , _lowercase :Optional[str] , **_lowercase :List[str]) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(_lowercase , add_special_tokens=_lowercase , **_lowercase)
UpperCAmelCase_ = self.get_lang_id(_lowercase)
UpperCAmelCase_ = tgt_lang_id
return inputs
def __a ( self :Optional[Any]) -> Optional[int]:
self.set_src_lang_special_tokens(self.src_lang)
def __a ( self :List[str]) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang)
def __a ( self :Optional[Any] , _lowercase :str) -> None:
UpperCAmelCase_ = self.get_lang_token(_lowercase)
UpperCAmelCase_ = self.lang_token_to_id[lang_token]
UpperCAmelCase_ = [self.cur_lang_id]
UpperCAmelCase_ = [self.eos_token_id]
def __a ( self :str , _lowercase :str) -> None:
UpperCAmelCase_ = self.get_lang_token(_lowercase)
UpperCAmelCase_ = self.lang_token_to_id[lang_token]
UpperCAmelCase_ = [self.cur_lang_id]
UpperCAmelCase_ = [self.eos_token_id]
def __a ( self :Dict , _lowercase :str) -> str:
return self.lang_code_to_token[lang]
def __a ( self :Union[str, Any] , _lowercase :str) -> int:
UpperCAmelCase_ = self.get_lang_token(_lowercase)
return self.lang_token_to_id[lang_token]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
UpperCAmelCase_ = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def A ( __UpperCAmelCase ) -> Union[Dict, List]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' ) as f:
return json.load(__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
UpperCAmelCase_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def A ( ) -> Any:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="Wav2Vec2FeatureExtractor"
UpperCamelCase__ : Optional[Any] ="AutoTokenizer"
def __init__( self :Dict , _lowercase :Any , _lowercase :Optional[Any]) -> Any:
super().__init__(_lowercase , _lowercase)
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
@classmethod
def __a ( cls :List[str] , _lowercase :str , **_lowercase :List[str]) -> List[Any]:
try:
return super().from_pretrained(_lowercase , **_lowercase)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _lowercase , )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(_lowercase , **_lowercase)
UpperCAmelCase_ = WavaVecaCTCTokenizer.from_pretrained(_lowercase , **_lowercase)
return cls(feature_extractor=_lowercase , tokenizer=_lowercase)
def __call__( self :Tuple , *_lowercase :Optional[Any] , **_lowercase :Optional[int]) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase)
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''')
UpperCAmelCase_ = kwargs.pop('''raw_speech''')
else:
UpperCAmelCase_ = kwargs.pop('''audio''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''sampling_rate''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''text''' , _lowercase)
if len(_lowercase) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase)
if text is not None:
UpperCAmelCase_ = self.tokenizer(_lowercase , **_lowercase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings['''input_ids''']
return inputs
def __a ( self :int , *_lowercase :Tuple , **_lowercase :int) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase , **_lowercase)
UpperCAmelCase_ = kwargs.pop('''input_features''' , _lowercase)
UpperCAmelCase_ = kwargs.pop('''labels''' , _lowercase)
if len(_lowercase) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase)
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(_lowercase , **_lowercase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels['''input_ids''']
return input_features
def __a ( self :Union[str, Any] , *_lowercase :Optional[int] , **_lowercase :List[Any]) -> Tuple:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def __a ( self :Dict , *_lowercase :int , **_lowercase :Optional[Any]) -> Any:
return self.tokenizer.decode(*_lowercase , **_lowercase)
@contextmanager
def __a ( self :Any) -> Tuple:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''')
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCamelCase_ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class a_ ( _snake_case ):
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Union[str, Any] =ElectraTokenizer
def __init__( self :List[Any] , _lowercase :List[str]=None , _lowercase :Any=None , _lowercase :int=True , _lowercase :Dict="[UNK]" , _lowercase :Dict="[SEP]" , _lowercase :Tuple="[PAD]" , _lowercase :Optional[int]="[CLS]" , _lowercase :List[Any]="[MASK]" , _lowercase :List[Any]=True , _lowercase :Any=None , **_lowercase :Any , ) -> List[str]:
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , _lowercase) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(_lowercase , normalizer_state.pop('''type'''))
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**_lowercase)
UpperCAmelCase_ = do_lower_case
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Optional[int]=None) -> Union[str, Any]:
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self :List[str] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
UpperCAmelCase_ = self._tokenizer.model.save(_lowercase , name=_lowercase)
return tuple(_lowercase)
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__UpperCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__UpperCAmelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__UpperCAmelCase )
return parser.parse_args()
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = parse_args()
# Import training_script as a module.
UpperCAmelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_ = script_fpath.stem
UpperCAmelCase_ = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
UpperCAmelCase_ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
UpperCamelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
order.append(__UpperCAmelCase )
return order
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return component
def A ( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase ) * [False]
UpperCAmelCase_ = {vert: [] for vert in range(len(__UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__UpperCAmelCase )
UpperCAmelCase_ = []
for i, was_visited in enumerate(__UpperCAmelCase ):
if not was_visited:
order += topology_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = []
UpperCAmelCase_ = len(__UpperCAmelCase ) * [False]
for i in range(len(__UpperCAmelCase ) ):
UpperCAmelCase_ = order[len(__UpperCAmelCase ) - i - 1]
if not visited[vert]:
UpperCAmelCase_ = find_components(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
components_list.append(__UpperCAmelCase )
return components_list
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =StableDiffusionInstructPixaPixPipeline
UpperCamelCase__ : List[str] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ : int =IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Optional[int] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self :List[Any]) -> Any:
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=_lowercase)
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_lowercase)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __a ( self :Tuple , _lowercase :Optional[int] , _lowercase :int=0) -> Union[str, Any]:
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''')
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_lowercase)
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = sd_pipe(**_lowercase).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __a ( self :Optional[int]) -> int:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_lowercase)
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = '''french fries'''
UpperCAmelCase_ = sd_pipe(**_lowercase , negative_prompt=_lowercase)
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __a ( self :List[str]) -> Union[str, Any]:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_lowercase)
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = [inputs['''prompt''']] * 2
UpperCAmelCase_ = np.array(inputs['''image''']).astype(np.floataa) / 255.0
UpperCAmelCase_ = torch.from_numpy(_lowercase).unsqueeze(0).to(_lowercase)
UpperCAmelCase_ = image / 2 + 0.5
UpperCAmelCase_ = image.permute(0 , 3 , 1 , 2)
UpperCAmelCase_ = image.repeat(2 , 1 , 1 , 1)
UpperCAmelCase_ = sd_pipe(**_lowercase).images
UpperCAmelCase_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''')
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_lowercase)
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = sd_pipe(**_lowercase).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = [round(_lowercase , 4) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_lowercase) for x in slice]))
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __a ( self :Dict) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def __a ( self :Dict) -> int:
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_lowercase)
UpperCAmelCase_ = VaeImageProcessor(do_resize=_lowercase , do_normalize=_lowercase)
UpperCAmelCase_ = pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = pipe(**self.get_dummy_inputs_by_type(_lowercase , input_image_type='''pt'''))[0]
UpperCAmelCase_ = components['''vae''']
UpperCAmelCase_ = self.get_dummy_inputs_by_type(_lowercase , input_image_type='''pt''')
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ = vae.encode(inputs[image_param]).latent_dist.mode()
UpperCAmelCase_ = pipe(**_lowercase)[0]
UpperCAmelCase_ = np.abs(out - out_latents_inputs).max()
self.assertLess(_lowercase , 1E-4 , '''passing latents as image input generate different result from passing image''')
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :Optional[int]) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[str] , _lowercase :List[str]=0) -> Optional[Any]:
UpperCAmelCase_ = torch.manual_seed(_lowercase)
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''')
UpperCAmelCase_ = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :List[str]) -> Union[str, Any]:
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**_lowercase).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def __a ( self :Dict) -> str:
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowercase)
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**_lowercase).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowercase)
UpperCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**_lowercase).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def __a ( self :Tuple) -> str:
UpperCAmelCase_ = 0
def callback_fn(_lowercase :int , _lowercase :int , _lowercase :torch.FloatTensor) -> None:
UpperCAmelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
UpperCAmelCase_ = False
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
pipe(**_lowercase , callback=_lowercase , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self :int) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**_lowercase)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self :Any) -> Optional[int]:
UpperCAmelCase_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ = inputs['''image'''].resize((504, 504))
UpperCAmelCase_ = '''timbrooks/instruct-pix2pix'''
UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , )
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
pipe.enable_attention_slicing()
UpperCAmelCase_ = pipe(**_lowercase)
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCAmelCase_ = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> Dict:
UpperCAmelCase_ = get_activation('''swish''')
self.assertIsInstance(_lowercase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def __a ( self :List[Any]) -> List[Any]:
UpperCAmelCase_ = get_activation('''silu''')
self.assertIsInstance(_lowercase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def __a ( self :str) -> Any:
UpperCAmelCase_ = get_activation('''mish''')
self.assertIsInstance(_lowercase , nn.Mish)
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = get_activation('''gelu''')
self.assertIsInstance(_lowercase , nn.GELU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( __UpperCAmelCase = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__UpperCAmelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
UpperCAmelCase_ = QuantumRegister(__UpperCAmelCase , '''qr''' )
UpperCAmelCase_ = ClassicalRegister(__UpperCAmelCase , '''cr''' )
UpperCAmelCase_ = QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = number_of_qubits
for i in range(__UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __UpperCAmelCase , __UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__UpperCAmelCase , __UpperCAmelCase )
# simulate with 10000 shots
UpperCAmelCase_ = Aer.get_backend('''qasm_simulator''' )
UpperCAmelCase_ = execute(__UpperCAmelCase , __UpperCAmelCase , shots=1_0000 )
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =XLNetTokenizer
UpperCamelCase__ : List[str] =XLNetTokenizerFast
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Dict =True
def __a ( self :List[str]) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLNetTokenizer(_lowercase , keep_accents=_lowercase)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> Dict:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''<eod>''')
self.assertEqual(len(_lowercase) , 1006)
def __a ( self :Union[str, Any]) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :int) -> List[str]:
UpperCAmelCase_ = XLNetTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __a ( self :Tuple) -> List[str]:
UpperCAmelCase_ = XLNetTokenizer(_lowercase , do_lower_case=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''▁he''', '''ll''', '''o'''])
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = XLNetTokenizer(_lowercase , do_lower_case=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def __a ( self :Any) -> str:
UpperCAmelCase_ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''')
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __a ( self :str) -> int:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase_ = 4
UpperCamelCase_ = 3
class a_ ( _snake_case ):
pass
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for shard in shards:
for i in range(__UpperCAmelCase ):
yield {"i": i, "shard": shard}
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = int(os.environ['''RANK'''] )
UpperCAmelCase_ = int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase_ = ArgumentParser()
parser.add_argument('''--streaming''' , type=__UpperCAmelCase )
parser.add_argument('''--local_rank''' , type=__UpperCAmelCase )
parser.add_argument('''--num_workers''' , type=__UpperCAmelCase , default=0 )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.streaming
UpperCAmelCase_ = args.num_workers
UpperCAmelCase_ = {'''shards''': [f"shard_{shard_idx}" for shard_idx in range(__UpperCAmelCase )]}
UpperCAmelCase_ = IterableDataset.from_generator(__UpperCAmelCase , gen_kwargs=__UpperCAmelCase )
if not streaming:
UpperCAmelCase_ = Dataset.from_list(list(__UpperCAmelCase ) )
UpperCAmelCase_ = split_dataset_by_node(__UpperCAmelCase , rank=__UpperCAmelCase , world_size=__UpperCAmelCase )
UpperCAmelCase_ = torch.utils.data.DataLoader(__UpperCAmelCase , num_workers=__UpperCAmelCase )
UpperCAmelCase_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
UpperCamelCase__ : int
UpperCamelCase__ : int
class a_ :
def __init__( self :List[Any] , _lowercase :int) -> List[str]:
UpperCAmelCase_ = [[] for _ in range(_lowercase)]
UpperCAmelCase_ = size
def __getitem__( self :Tuple , _lowercase :int) -> Iterator[Edge]:
return iter(self._graph[vertex])
@property
def __a ( self :int) -> Optional[int]:
return self._size
def __a ( self :Optional[int] , _lowercase :int , _lowercase :int , _lowercase :int) -> Optional[Any]:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''')
self._graph[from_vertex].append(Edge(_lowercase , _lowercase))
def __a ( self :Any , _lowercase :int , _lowercase :int) -> int | None:
UpperCAmelCase_ = deque([start_vertex])
UpperCAmelCase_ = [None] * self.size
UpperCAmelCase_ = 0
while queue:
UpperCAmelCase_ = queue.popleft()
UpperCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase_ = current_distance + edge.weight
UpperCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(_lowercase , _lowercase)
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
UpperCAmelCase_ = iter(__UpperCAmelCase )
while True:
UpperCAmelCase_ = tuple(itertools.islice(__UpperCAmelCase , __UpperCAmelCase ) )
if not chunk:
return
yield chunk
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
UpperCAmelCase_ = ''''''
if len(__UpperCAmelCase ) < 2:
return dirty
for i in range(len(__UpperCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__UpperCAmelCase ) & 1:
clean += "X"
return clean
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
UpperCAmelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__UpperCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__UpperCAmelCase )
return table
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = generate_table(__UpperCAmelCase )
UpperCAmelCase_ = prepare_input(__UpperCAmelCase )
UpperCAmelCase_ = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCAmelCase , 2 ):
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = generate_table(__UpperCAmelCase )
UpperCAmelCase_ = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__UpperCAmelCase , 2 ):
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(table.index(__UpperCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowerCAmelCase = None
_lowerCAmelCase = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowerCAmelCase = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class a_ :
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="PIL.Image.Image"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Image" , init=_snake_case , repr=_snake_case )
def __call__( self :Optional[int]) -> Dict:
return self.pa_type
def __a ( self :str , _lowercase :Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''')
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = np.array(_lowercase)
if isinstance(_lowercase , _lowercase):
return {"path": value, "bytes": None}
elif isinstance(_lowercase , _lowercase):
return {"path": None, "bytes": value}
elif isinstance(_lowercase , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowercase)
elif isinstance(_lowercase , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowercase)
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :List[Any] , _lowercase :dict , _lowercase :Any=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''')
if token_per_repo_id is None:
UpperCAmelCase_ = {}
UpperCAmelCase_ , UpperCAmelCase_ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(_lowercase):
UpperCAmelCase_ = PIL.Image.open(_lowercase)
else:
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id.get(_lowercase)
except ValueError:
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ = BytesIO(f.read())
UpperCAmelCase_ = PIL.Image.open(bytes_)
else:
UpperCAmelCase_ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __a ( self :str) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
)
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
UpperCAmelCase_ = pa.array(
[encode_np_array(np.array(_lowercase))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :List[Any] , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :List[Any]):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
def A ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A ( __UpperCAmelCase ) -> bytes:
'''simple docstring'''
UpperCAmelCase_ = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ = image.format
else:
UpperCAmelCase_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(__UpperCAmelCase , format=__UpperCAmelCase )
return buffer.getvalue()
def A ( __UpperCAmelCase ) -> dict:
'''simple docstring'''
if hasattr(__UpperCAmelCase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__UpperCAmelCase )}
def A ( __UpperCAmelCase ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ = array.dtype
UpperCAmelCase_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ = dtype.kind
UpperCAmelCase_ = dtype.itemsize
UpperCAmelCase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ = dtype_byteorder + dtype_kind + str(__UpperCAmelCase )
UpperCAmelCase_ = np.dtype(__UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
UpperCAmelCase_ = PIL.Image.fromarray(array.astype(__UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(__UpperCAmelCase )}
def A ( __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_ , UpperCAmelCase_ = first_non_null_value(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__UpperCAmelCase , np.ndarray ):
UpperCAmelCase_ = no_op_if_value_is_null(__UpperCAmelCase )
return [obj_to_image_dict_func(__UpperCAmelCase ) for obj in objs]
elif isinstance(__UpperCAmelCase , PIL.Image.Image ):
UpperCAmelCase_ = no_op_if_value_is_null(__UpperCAmelCase )
return [obj_to_image_dict_func(__UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
def __init__( self :Union[str, Any] , *_lowercase :str , **_lowercase :Any) -> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase)
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : Union[List[PIL.Image.Image], np.ndarray]
UpperCamelCase__ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="unispeech"
def __init__( self :Dict , _lowercase :Optional[int]=32 , _lowercase :List[Any]=768 , _lowercase :List[str]=12 , _lowercase :Tuple=12 , _lowercase :List[str]=3072 , _lowercase :Dict="gelu" , _lowercase :Any=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Dict=0.1 , _lowercase :List[str]=0.0 , _lowercase :Dict=0.0 , _lowercase :List[Any]=0.1 , _lowercase :str=0.1 , _lowercase :Optional[Any]=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[Any]="group" , _lowercase :str="gelu" , _lowercase :Any=(512, 512, 512, 512, 512, 512, 512) , _lowercase :Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , _lowercase :Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , _lowercase :Dict=False , _lowercase :str=128 , _lowercase :List[str]=16 , _lowercase :Union[str, Any]=False , _lowercase :Union[str, Any]=True , _lowercase :Any=0.05 , _lowercase :List[Any]=10 , _lowercase :Optional[Any]=2 , _lowercase :Tuple=0.0 , _lowercase :Union[str, Any]=10 , _lowercase :Union[str, Any]=0 , _lowercase :List[Any]=320 , _lowercase :Union[str, Any]=2 , _lowercase :int=0.1 , _lowercase :int=100 , _lowercase :List[str]=256 , _lowercase :Tuple=256 , _lowercase :List[str]=0.1 , _lowercase :List[Any]="mean" , _lowercase :Any=False , _lowercase :Union[str, Any]=False , _lowercase :str=256 , _lowercase :int=80 , _lowercase :List[str]=0 , _lowercase :Union[str, Any]=1 , _lowercase :str=2 , _lowercase :Dict=0.5 , **_lowercase :List[Any] , ) -> Dict:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_norm
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(_lowercase)
UpperCAmelCase_ = list(_lowercase)
UpperCAmelCase_ = list(_lowercase)
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = len(self.conv_dim)
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_ctc_classes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = do_stable_layer_norm
UpperCAmelCase_ = use_weighted_layer_sum
UpperCAmelCase_ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = apply_spec_augment
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
UpperCAmelCase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ = num_codevectors_per_group
UpperCAmelCase_ = num_codevector_groups
UpperCAmelCase_ = contrastive_logits_temperature
UpperCAmelCase_ = feat_quantizer_dropout
UpperCAmelCase_ = num_negatives
UpperCAmelCase_ = codevector_dim
UpperCAmelCase_ = proj_codevector_dim
UpperCAmelCase_ = diversity_loss_weight
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# pretraining loss
UpperCAmelCase_ = replace_prob
@property
def __a ( self :Dict) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase_ = sys.version_info >= (3, 10)
def A ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : int
UpperCamelCase__ : float
UpperCamelCase__ : str
UpperCamelCase__ : bool
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : int =42
UpperCamelCase__ : str =field(default="toto" , metadata={"help": "help message"} )
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[bool] =None
class a_ ( _snake_case ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] ="titi"
UpperCamelCase__ : int ="toto"
class a_ ( _snake_case ):
"""simple docstring"""
UpperCamelCase__ : Dict ="titi"
UpperCamelCase__ : Tuple ="toto"
UpperCamelCase__ : Dict =42
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : BasicEnum ="toto"
def __a ( self :List[Any]) -> int:
UpperCAmelCase_ = BasicEnum(self.foo)
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : MixedTypeEnum ="toto"
def __a ( self :Any) -> Optional[int]:
UpperCAmelCase_ = MixedTypeEnum(self.foo)
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[float] =field(default=_snake_case , metadata={"help": "help message"} )
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : Optional[List[str]] =list_field(default=[] )
UpperCamelCase__ : Optional[List[int]] =list_field(default=[] )
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : List[int] =list_field(default=[] )
UpperCamelCase__ : List[int] =list_field(default=[1, 2, 3] )
UpperCamelCase__ : List[str] =list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCamelCase__ : List[float] =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : List[int] =field()
UpperCamelCase__ : str =field()
UpperCamelCase__ : BasicEnum =field()
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = BasicEnum(self.required_enum)
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : int
UpperCamelCase__ : "BasicEnum" =field()
UpperCamelCase__ : "Optional[bool]" =None
UpperCamelCase__ : "str" =field(default="toto" , metadata={"help": "help message"} )
UpperCamelCase__ : "List[str]" =list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool | None =None
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : int | None =None
UpperCamelCase__ : float | None =field(default=_snake_case , metadata={"help": "help message"} )
UpperCamelCase__ : str | None =None
UpperCamelCase__ : list[str] | None =list_field(default=[] )
UpperCamelCase__ : list[int] | None =list_field(default=[] )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :List[str] , _lowercase :argparse.ArgumentParser , _lowercase :argparse.ArgumentParser) -> Optional[int]:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
UpperCAmelCase_ = {k: v for k, v in vars(_lowercase).items() if k != '''container'''}
UpperCAmelCase_ = {k: v for k, v in vars(_lowercase).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowercase) and yy.get('''choices''' , _lowercase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowercase) , yy['''type'''](_lowercase))
del xx["type"], yy["type"]
self.assertEqual(_lowercase , _lowercase)
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase)
expected.add_argument('''--bar''' , type=_lowercase , required=_lowercase)
expected.add_argument('''--baz''' , type=_lowercase , required=_lowercase)
expected.add_argument('''--flag''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''')
self.argparsersEqual(_lowercase , _lowercase)
UpperCAmelCase_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((UpperCAmelCase_ ) , ) = parser.parse_args_into_dataclasses(_lowercase , look_for_args_file=_lowercase)
self.assertFalse(example.flag)
def __a ( self :int) -> int:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowercase)
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''')
self.argparsersEqual(_lowercase , _lowercase)
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''')
expected.add_argument('''--baz''' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='''?''')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowercase , dest='''baz''')
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase)
UpperCAmelCase_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase)
for dataclass_type in dataclass_types:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
self.argparsersEqual(_lowercase , _lowercase)
UpperCAmelCase_ = parser.parse_args([])
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase))
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''--no_baz'''])
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase))
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''--baz'''])
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase))
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''])
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase))
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''])
self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase))
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42]) , )
self.argparsersEqual(_lowercase , _lowercase)
UpperCAmelCase_ = parser.parse_args([])
self.assertEqual(args.foo , '''toto''')
UpperCAmelCase_ = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''titi'''])
self.assertEqual(args.foo , '''titi''')
UpperCAmelCase_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''42'''])
self.assertEqual(args.foo , 42)
UpperCAmelCase_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __a ( self :Optional[int]) -> Tuple:
@dataclass
class a_ :
"""simple docstring"""
UpperCamelCase__ : Literal["titi", "toto", 42] ="toto"
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42]) , )
self.argparsersEqual(_lowercase , _lowercase)
UpperCAmelCase_ = parser.parse_args([])
self.assertEqual(args.foo , '''toto''')
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''titi'''])
self.assertEqual(args.foo , '''titi''')
UpperCAmelCase_ = parser.parse_args(['''--foo''', '''42'''])
self.assertEqual(args.foo , 42)
def __a ( self :Union[str, Any]) -> str:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowercase)
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowercase)
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase)
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowercase)
self.argparsersEqual(_lowercase , _lowercase)
UpperCAmelCase_ = parser.parse_args([])
self.assertEqual(
_lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3]) , )
UpperCAmelCase_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split())
self.assertEqual(_lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7]))
def __a ( self :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowercase , type=_lowercase)
expected.add_argument('''--bar''' , default=_lowercase , type=_lowercase , help='''help message''')
expected.add_argument('''--baz''' , default=_lowercase , type=_lowercase)
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowercase)
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowercase)
UpperCAmelCase_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowercase)
for dataclass_type in dataclass_types:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
self.argparsersEqual(_lowercase , _lowercase)
UpperCAmelCase_ = parser.parse_args([])
self.assertEqual(_lowercase , Namespace(foo=_lowercase , bar=_lowercase , baz=_lowercase , ces=[] , des=[]))
UpperCAmelCase_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split())
self.assertEqual(_lowercase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3]))
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowercase , required=_lowercase)
expected.add_argument('''--required_str''' , type=_lowercase , required=_lowercase)
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto''']) , choices=['''titi''', '''toto'''] , required=_lowercase , )
self.argparsersEqual(_lowercase , _lowercase)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowercase , required=_lowercase)
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto''']) , choices=['''titi''', '''toto'''] , required=_lowercase , )
expected.add_argument('''--opt''' , type=_lowercase , default=_lowercase)
expected.add_argument('''--baz''' , default='''toto''' , type=_lowercase , help='''help message''')
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowercase)
self.argparsersEqual(_lowercase , _lowercase)
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
UpperCAmelCase_ = parser.parse_dict(_lowercase)[0]
UpperCAmelCase_ = BasicExample(**_lowercase)
self.assertEqual(_lowercase , _lowercase)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowercase , parser.parse_dict , _lowercase , allow_extra_keys=_lowercase)
def __a ( self :Optional[Any]) -> Optional[int]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = os.path.join(_lowercase , '''temp_json''')
os.mkdir(_lowercase)
with open(temp_local_path + '''.json''' , '''w+''') as f:
json.dump(_lowercase , _lowercase)
UpperCAmelCase_ = parser.parse_yaml_file(Path(temp_local_path + '''.json'''))[0]
UpperCAmelCase_ = BasicExample(**_lowercase)
self.assertEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
UpperCAmelCase_ = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = os.path.join(_lowercase , '''temp_yaml''')
os.mkdir(_lowercase)
with open(temp_local_path + '''.yaml''' , '''w+''') as f:
yaml.dump(_lowercase , _lowercase)
UpperCAmelCase_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml'''))[0]
UpperCAmelCase_ = BasicExample(**_lowercase)
self.assertEqual(_lowercase , _lowercase)
def __a ( self :Optional[int]) -> List[str]:
UpperCAmelCase_ = HfArgumentParser(_lowercase)
self.assertIsNotNone(_lowercase)
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =GPTSwaTokenizer
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Tuple =False
def __a ( self :str) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , _lowercase :int) -> str:
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = '''This is a test'''
return input_text, output_text
def __a ( self :Optional[Any]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> int:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(_lowercase) , 2000)
def __a ( self :Union[str, Any]) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def __a ( self :Dict) -> str:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , [465, 287, 265, 631, 842])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def __a ( self :str) -> Any:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowercase , _lowercase):
self.assertListEqual(tokenizer.encode_fast(_lowercase) , _lowercase)
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowercase , _lowercase):
self.assertEqual(tokenizer.decode_fast(_lowercase) , _lowercase)
@slow
def __a ( self :Optional[Any]) -> Optional[int]:
UpperCAmelCase_ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_lowercase , )
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = args.log_outputs
UpperCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
UpperCAmelCase_ = load_metric('''wer''' )
UpperCAmelCase_ = load_metric('''cer''' )
# compute metrics
UpperCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
UpperCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
UpperCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(__UpperCAmelCase )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(__UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase_ = f"log_{dataset_id}_predictions.txt"
UpperCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(__UpperCAmelCase , '''w''' ) as p, open(__UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(__UpperCAmelCase , __UpperCAmelCase ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(__UpperCAmelCase , with_indices=__UpperCAmelCase )
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase_ = re.sub(__UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
UpperCAmelCase_ = ''' '''.join(text.split(__UpperCAmelCase ) )
return text
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase_ = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=__UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase_ = 0 if torch.cuda.is_available() else -1
UpperCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCAmelCase ):
UpperCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase_ = prediction['''text''']
UpperCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
UpperCAmelCase_ = dataset.map(__UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCamelCase_ = parser.parse_args()
main(args)
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
def A ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }") | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
UpperCamelCase_ = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(__UpperCAmelCase )}"
)
raise ValueError(__UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
import warnings
from functools import wraps
from typing import Callable
def A ( __UpperCAmelCase ) -> Callable:
'''simple docstring'''
@wraps(__UpperCAmelCase )
def _inner_fn(*__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , __UpperCAmelCase , )
return fn(*__UpperCAmelCase , **__UpperCAmelCase )
return _inner_fn
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class a_ ( unittest.TestCase ):
@property
def __a ( self :Optional[int]) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __a ( self :Union[str, Any]) -> str:
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def __a ( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''')
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''')
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''A red cat sitting on a park bench'''
UpperCAmelCase_ = np.random.RandomState(0)
UpperCAmelCase_ = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowercase , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :int) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Optional[int]) -> Union[str, Any]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
sd_pipe.set_scheduler('''sample_euler''')
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''')
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
sd_pipe.set_scheduler('''sample_euler''')
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = sd_pipe([prompt] , generator=_lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''')
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1
def __a ( self :List[str]) -> str:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
UpperCAmelCase_ = sd_pipe.to(_lowercase)
sd_pipe.set_progress_bar_config(disable=_lowercase)
sd_pipe.set_scheduler('''sample_dpmpp_2m''')
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=_lowercase , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase_ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
from math import factorial
def A ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
return sum(int(__UpperCAmelCase ) for x in str(factorial(__UpperCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self :Dict , _lowercase :Tuple=2 , _lowercase :str=3 , _lowercase :Optional[Any]=64 , _lowercase :Optional[int]=None) -> List[str]:
UpperCAmelCase_ = np.random.default_rng(_lowercase)
UpperCAmelCase_ = length
UpperCAmelCase_ = rng.normal(size=(length,)).astype(np.floataa)
UpperCAmelCase_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self :Union[str, Any]) -> List[Any]:
return self.length
def __getitem__( self :int , _lowercase :Optional[int]) -> Union[str, Any]:
return {"x": self.x[i], "y": self.y[i]}
class a_ ( torch.nn.Module ):
def __init__( self :Dict , _lowercase :str=0 , _lowercase :Optional[Any]=0 , _lowercase :Optional[int]=False) -> Optional[int]:
super().__init__()
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3]).float())
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3]).float())
UpperCAmelCase_ = True
def __a ( self :Tuple , _lowercase :Tuple=None) -> str:
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
UpperCAmelCase_ = False
return x * self.a[0] + self.b[0]
class a_ ( torch.nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :int=0 , _lowercase :List[Any]=0 , _lowercase :Optional[Any]=False) -> Optional[int]:
super().__init__()
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor(_lowercase).float())
UpperCAmelCase_ = torch.nn.Parameter(torch.tensor(_lowercase).float())
UpperCAmelCase_ = True
def __a ( self :List[Any] , _lowercase :List[Any]=None) -> Dict:
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
UpperCAmelCase_ = False
return x * self.a + self.b
def A ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ = load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCAmelCase_ = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
UpperCAmelCase_ = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , _snake_case , _snake_case ):
UpperCamelCase__ : int =32
UpperCamelCase__ : int =4
UpperCamelCase__ : int =4
UpperCamelCase__ : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Tuple[str] =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCamelCase__ : Union[bool, Tuple[bool]] =False
UpperCamelCase__ : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
UpperCamelCase__ : int =2
UpperCamelCase__ : Union[int, Tuple[int]] =8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] =None
UpperCamelCase__ : int =12_80
UpperCamelCase__ : float =0.0
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
UpperCamelCase__ : bool =True
UpperCamelCase__ : int =0
UpperCamelCase__ : bool =False
def __a ( self :int , _lowercase :jax.random.KeyArray) -> FrozenDict:
# init input tensors
UpperCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ = jnp.zeros(_lowercase , dtype=jnp.floataa)
UpperCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa)
UpperCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(_lowercase)
UpperCAmelCase_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowercase , _lowercase , _lowercase , _lowercase)["params"]
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.block_out_channels
UpperCAmelCase_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''')
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
UpperCAmelCase_ = FlaxTimestepEmbedding(_lowercase , dtype=self.dtype)
UpperCAmelCase_ = self.only_cross_attention
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = (num_attention_heads,) * len(self.down_block_types)
# down
UpperCAmelCase_ = []
UpperCAmelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ = FlaxDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowercase)
UpperCAmelCase_ = down_blocks
# mid
UpperCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
UpperCAmelCase_ = []
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = reversed_block_out_channels[i]
UpperCAmelCase_ = reversed_block_out_channels[min(i + 1 , len(_lowercase) - 1)]
UpperCAmelCase_ = i == len(_lowercase) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCAmelCase_ = FlaxCrossAttnUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ = FlaxUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowercase)
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = up_blocks
# out
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Any , _lowercase :Optional[int] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :List[str]=None , _lowercase :Union[str, Any]=None , _lowercase :bool = True , _lowercase :bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowercase , jnp.ndarray):
UpperCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(_lowercase , jnp.ndarray) and len(timesteps.shape) == 0:
UpperCAmelCase_ = timesteps.astype(dtype=jnp.floataa)
UpperCAmelCase_ = jnp.expand_dims(_lowercase , 0)
UpperCAmelCase_ = self.time_proj(_lowercase)
UpperCAmelCase_ = self.time_embedding(_lowercase)
# 2. pre-process
UpperCAmelCase_ = jnp.transpose(_lowercase , (0, 2, 3, 1))
UpperCAmelCase_ = self.conv_in(_lowercase)
# 3. down
UpperCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ , UpperCAmelCase_ = down_block(_lowercase , _lowercase , _lowercase , deterministic=not train)
else:
UpperCAmelCase_ , UpperCAmelCase_ = down_block(_lowercase , _lowercase , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCAmelCase_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowercase , _lowercase):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ = new_down_block_res_samples
# 4. mid
UpperCAmelCase_ = self.mid_block(_lowercase , _lowercase , _lowercase , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCAmelCase_ = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCAmelCase_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = up_block(
_lowercase , temb=_lowercase , encoder_hidden_states=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train , )
else:
UpperCAmelCase_ = up_block(_lowercase , temb=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train)
# 6. post-process
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
UpperCAmelCase_ = nn.silu(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
UpperCAmelCase_ = jnp.transpose(_lowercase , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowercase)
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self :List[str] , _lowercase :List[Any] , _lowercase :List[Any]=3 , _lowercase :Any=32 , _lowercase :int=3 , _lowercase :List[Any]=10 , _lowercase :Any=[10, 20, 30, 40] , _lowercase :Tuple=[1, 1, 2, 1] , _lowercase :Tuple=True , _lowercase :Tuple=True , _lowercase :Optional[Any]="relu" , _lowercase :str=3 , _lowercase :Optional[Any]=None , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :Optional[Any]) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __a ( self :Dict , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :Dict) -> Dict:
UpperCAmelCase_ = TFRegNetModel(config=_lowercase)
UpperCAmelCase_ = model(_lowercase , training=_lowercase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :Union[str, Any] , _lowercase :int , _lowercase :List[str] , _lowercase :int) -> Union[str, Any]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFRegNetForImageClassification(_lowercase)
UpperCAmelCase_ = model(_lowercase , labels=_lowercase , training=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :Tuple) -> Optional[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Any =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : str =(
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : str =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : str =False
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = TFRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase)
def __a ( self :Optional[int]) -> int:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def __a ( self :Union[str, Any]) -> List[str]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __a ( self :List[str]) -> int:
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Union[str, Any]) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase)
def __a ( self :Optional[int]) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :int) -> Optional[int]:
def check_hidden_states_output(_lowercase :List[str] , _lowercase :Tuple , _lowercase :Tuple):
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = model(**self._prepare_for_class(_lowercase , _lowercase) , training=_lowercase)
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_lowercase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase)
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowercase :Union[str, Any] , _lowercase :str , _lowercase :int , _lowercase :Any={}):
UpperCAmelCase_ = model(_lowercase , return_dict=_lowercase , **_lowercase)
UpperCAmelCase_ = model(_lowercase , return_dict=_lowercase , **_lowercase).to_tuple()
def recursive_check(_lowercase :Dict , _lowercase :Union[str, Any]):
if isinstance(_lowercase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase):
recursive_check(_lowercase , _lowercase)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowercase , _lowercase)) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"
) , )
recursive_check(_lowercase , _lowercase)
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase)
check_equivalence(_lowercase , _lowercase , _lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
check_equivalence(_lowercase , _lowercase , _lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase)
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True})
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True})
def __a ( self :Any) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase)
@slow
def __a ( self :Optional[int]) -> List[str]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFRegNetModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :Optional[int]) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __a ( self :int) -> Union[str, Any]:
UpperCAmelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''tf''')
# forward pass
UpperCAmelCase_ = model(**_lowercase , training=_lowercase)
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , _lowercase)
UpperCAmelCase_ = tf.constant([-0.4_180, -1.5_051, -3.4_836])
tf.debugging.assert_near(outputs.logits[0, :3] , _lowercase , atol=1E-4)
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
import sys
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = [[0 for x in range(__UpperCAmelCase )] for x in range(__UpperCAmelCase )]
UpperCAmelCase_ = [[0 for x in range(__UpperCAmelCase )] for x in range(__UpperCAmelCase )]
for chain_length in range(2 , __UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if i == j:
print('''A''' + str(__UpperCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__UpperCAmelCase , __UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__UpperCAmelCase , optimal_solution[i][j] + 1 , __UpperCAmelCase )
print(''')''' , end=''' ''' )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__UpperCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json")
class a_ ( unittest.TestCase ):
def __a ( self :Tuple) -> str:
UpperCAmelCase_ = 0
def __a ( self :Union[str, Any]) -> Optional[int]:
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto'''))
def __a ( self :str) -> int:
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-uncased''')
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :List[str]) -> Optional[Any]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :Optional[int]) -> Tuple:
UpperCAmelCase_ = AutoConfig.for_model('''roberta''')
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :Any) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase_ = os.path.join(_lowercase , '''fake-roberta''')
os.makedirs(_lowercase , exist_ok=_lowercase)
with open(os.path.join(_lowercase , '''config.json''') , '''w''') as f:
f.write(json.dumps({}))
UpperCAmelCase_ = AutoConfig.from_pretrained(_lowercase)
self.assertEqual(type(_lowercase) , _lowercase)
def __a ( self :Any) -> Any:
try:
AutoConfig.register('''custom''' , _lowercase)
# Wrong model type will raise an error
with self.assertRaises(_lowercase):
AutoConfig.register('''model''' , _lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase):
AutoConfig.register('''bert''' , _lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowercase)
UpperCAmelCase_ = AutoConfig.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __a ( self :Any) -> Optional[Any]:
with self.assertRaisesRegex(
_lowercase , '''bert-base is not a local folder and is not a valid model identifier'''):
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base''')
def __a ( self :Optional[int]) -> int:
with self.assertRaisesRegex(
_lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
UpperCAmelCase_ = AutoConfig.from_pretrained(_lowercase , revision='''aaaaaa''')
def __a ( self :str) -> Optional[int]:
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
UpperCAmelCase_ = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''')
def __a ( self :Tuple) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase):
UpperCAmelCase_ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase):
UpperCAmelCase_ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase)
UpperCAmelCase_ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase)
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''')
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowercase)
UpperCAmelCase_ = AutoConfig.from_pretrained(_lowercase , trust_remote_code=_lowercase)
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''')
def __a ( self :Any) -> Optional[Any]:
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="new-model"
try:
AutoConfig.register('''new-model''' , _lowercase)
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''')
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''')
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase)
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''')
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase)
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''')
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : int ="encoder-decoder"
UpperCamelCase__ : List[Any] =True
def __init__( self :str , **_lowercase :Any) -> List[Any]:
super().__init__(**_lowercase)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase_ = kwargs.pop('''encoder''')
UpperCAmelCase_ = encoder_config.pop('''model_type''')
UpperCAmelCase_ = kwargs.pop('''decoder''')
UpperCAmelCase_ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ = AutoConfig.for_model(_lowercase , **_lowercase)
UpperCAmelCase_ = AutoConfig.for_model(_lowercase , **_lowercase)
UpperCAmelCase_ = True
@classmethod
def __a ( cls :int , _lowercase :PretrainedConfig , _lowercase :PretrainedConfig , **_lowercase :str) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
UpperCAmelCase_ = True
UpperCAmelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowercase)
def __a ( self :List[Any]) -> int:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.encoder.to_dict()
UpperCAmelCase_ = self.decoder.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
import math
def A ( __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 ) -> list:
'''simple docstring'''
UpperCAmelCase_ = end or len(__UpperCAmelCase )
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = i
UpperCAmelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase_ = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase_ = temp_index_value
return array
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Max Heap
'''simple docstring'''
UpperCAmelCase_ = index
UpperCAmelCase_ = 2 * index + 1 # Left Node
UpperCAmelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase_ = right_index
if largest != index:
UpperCAmelCase_ , UpperCAmelCase_ = array[largest], array[index]
heapify(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase ) -> list:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for i in range(n - 1 , 0 , -1 ):
UpperCAmelCase_ , UpperCAmelCase_ = array[0], array[i]
heapify(__UpperCAmelCase , 0 , __UpperCAmelCase )
return array
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = low
UpperCAmelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase_ , UpperCAmelCase_ = array[j], array[i]
i += 1
def A ( __UpperCAmelCase ) -> list:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
return array
UpperCAmelCase_ = 2 * math.ceil(math.loga(len(__UpperCAmelCase ) ) )
UpperCAmelCase_ = 16
return intro_sort(__UpperCAmelCase , 0 , len(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__UpperCAmelCase )
max_depth -= 1
UpperCAmelCase_ = median_of_a(__UpperCAmelCase , __UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
UpperCAmelCase_ = partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
intro_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = p
return insertion_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase_ = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = SwinvaConfig()
UpperCAmelCase_ = swinva_name.split('''_''' )
UpperCAmelCase_ = name_split[1]
if "to" in name_split[3]:
UpperCAmelCase_ = int(name_split[3][-3:] )
else:
UpperCAmelCase_ = int(name_split[3] )
if "to" in name_split[2]:
UpperCAmelCase_ = int(name_split[2][-2:] )
else:
UpperCAmelCase_ = int(name_split[2][6:] )
if model_size == "tiny":
UpperCAmelCase_ = 96
UpperCAmelCase_ = (2, 2, 6, 2)
UpperCAmelCase_ = (3, 6, 12, 24)
elif model_size == "small":
UpperCAmelCase_ = 96
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (3, 6, 12, 24)
elif model_size == "base":
UpperCAmelCase_ = 128
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (4, 8, 16, 32)
else:
UpperCAmelCase_ = 192
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCAmelCase_ = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCAmelCase_ = 2_1841
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-22k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = img_size
UpperCAmelCase_ = num_classes
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
return config
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCAmelCase_ = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
UpperCAmelCase_ = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
UpperCAmelCase_ = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
UpperCAmelCase_ = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
UpperCAmelCase_ = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
UpperCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase_ = '''layernorm.bias'''
if "head" in name:
UpperCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
UpperCAmelCase_ = '''swinv2.''' + name
return name
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__UpperCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ = int(key_split[1] )
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[
dim : dim * 2
]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = val
return orig_state_dict
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase )
timm_model.eval()
UpperCAmelCase_ = get_swinva_config(__UpperCAmelCase )
UpperCAmelCase_ = SwinvaForImageClassification(__UpperCAmelCase )
model.eval()
UpperCAmelCase_ = convert_state_dict(timm_model.state_dict() , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
UpperCAmelCase_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
UpperCAmelCase_ = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase_ = timm_model(inputs['''pixel_values'''] )
UpperCAmelCase_ = model(**__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self :Tuple , _lowercase :Dict , _lowercase :Union[str, Any]=13 , _lowercase :Optional[Any]=7 , _lowercase :List[Any]=True , _lowercase :Dict=True , _lowercase :Optional[Any]=True , _lowercase :str=True , _lowercase :str=99 , _lowercase :Optional[Any]=16 , _lowercase :List[Any]=36 , _lowercase :Dict=6 , _lowercase :Tuple=6 , _lowercase :Optional[int]=6 , _lowercase :int=37 , _lowercase :str="gelu" , _lowercase :List[Any]=0.1 , _lowercase :int=0.1 , _lowercase :Any=512 , _lowercase :str=16 , _lowercase :Optional[Any]=2 , _lowercase :str=0.02 , _lowercase :Tuple=3 , _lowercase :List[Any]=4 , _lowercase :str=None , ) -> List[str]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def __a ( self :int) -> Optional[Any]:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self :int) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self :Optional[int] , _lowercase :Tuple , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Tuple , _lowercase :Optional[int] , _lowercase :Dict , _lowercase :List[Any]) -> List[Any]:
UpperCAmelCase_ = AlbertModel(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase)
UpperCAmelCase_ = model(_lowercase , token_type_ids=_lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __a ( self :Any , _lowercase :str , _lowercase :str , _lowercase :str , _lowercase :List[str] , _lowercase :str , _lowercase :Any , _lowercase :Optional[int]) -> Any:
UpperCAmelCase_ = AlbertForPreTraining(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def __a ( self :Union[str, Any] , _lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :str , _lowercase :Dict , _lowercase :int , _lowercase :Tuple , _lowercase :Dict) -> Any:
UpperCAmelCase_ = AlbertForMaskedLM(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :Dict , _lowercase :Union[str, Any] , _lowercase :Dict , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :int , _lowercase :int , _lowercase :str) -> str:
UpperCAmelCase_ = AlbertForQuestionAnswering(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __a ( self :Tuple , _lowercase :List[str] , _lowercase :Any , _lowercase :Dict , _lowercase :List[Any] , _lowercase :Optional[Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :Union[str, Any] , _lowercase :Optional[int] , _lowercase :str , _lowercase :Optional[int] , _lowercase :int , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :List[str]) -> Any:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __a ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Union[str, Any] , _lowercase :List[str] , _lowercase :int , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Tuple =(
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : List[str] =True
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Tuple , _lowercase :Dict=False) -> Dict:
UpperCAmelCase_ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
if return_labels:
if model_class in get_values(_lowercase):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase)
return inputs_dict
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = AlbertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37)
def __a ( self :int) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase)
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase)
def __a ( self :Optional[int]) -> Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase)
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase)
def __a ( self :List[str]) -> Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase)
def __a ( self :Optional[int]) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_lowercase)
@slow
def __a ( self :Optional[Any]) -> Dict:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
@require_torch
class a_ ( unittest.TestCase ):
@slow
def __a ( self :str) -> List[str]:
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''')
UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase)[0]
UpperCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , _lowercase)
UpperCAmelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4))
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :int , _lowercase :Union[str, Any] , _lowercase :str=7 , _lowercase :str=3 , _lowercase :List[str]=18 , _lowercase :List[str]=30 , _lowercase :List[Any]=400 , _lowercase :Optional[Any]=True , _lowercase :Dict=None , _lowercase :str=True , _lowercase :str=None , _lowercase :List[str]=True , _lowercase :Any=[0.5, 0.5, 0.5] , _lowercase :int=[0.5, 0.5, 0.5] , _lowercase :Dict=False , ) -> Optional[Any]:
UpperCAmelCase_ = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_reduce_labels
def __a ( self :Dict) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ = Image.open(dataset[0]['''file'''] )
UpperCAmelCase_ = Image.open(dataset[1]['''file'''] )
return image, map
def A ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase_ = Image.open(ds[0]['''file'''] )
UpperCAmelCase_ = Image.open(ds[1]['''file'''] )
UpperCAmelCase_ = Image.open(ds[2]['''file'''] )
UpperCAmelCase_ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =BeitImageProcessor if is_vision_available() else None
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = BeitImageProcessingTester(self)
@property
def __a ( self :str) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :List[Any]) -> List[str]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''do_center_crop'''))
self.assertTrue(hasattr(_lowercase , '''center_crop'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
def __a ( self :Tuple) -> Dict:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , _lowercase)
UpperCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowercase)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , _lowercase)
def __a ( self :int) -> List[str]:
pass
def __a ( self :Union[str, Any]) -> Any:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self :int) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self :Optional[int]) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self :Tuple) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
UpperCAmelCase_ = []
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , _lowercase , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ = prepare_semantic_single_inputs()
UpperCAmelCase_ = image_processing(_lowercase , _lowercase , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
UpperCAmelCase_ , UpperCAmelCase_ = prepare_semantic_batch_inputs()
UpperCAmelCase_ = image_processing(_lowercase , _lowercase , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def __a ( self :Union[str, Any]) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase_ , UpperCAmelCase_ = prepare_semantic_single_inputs()
UpperCAmelCase_ = image_processing(_lowercase , _lowercase , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
UpperCAmelCase_ = True
UpperCAmelCase_ = image_processing(_lowercase , _lowercase , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="distilbert"
UpperCamelCase__ : int ={
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self :int , _lowercase :Tuple=30522 , _lowercase :Tuple=512 , _lowercase :Optional[Any]=False , _lowercase :int=6 , _lowercase :int=12 , _lowercase :Tuple=768 , _lowercase :Dict=4 * 768 , _lowercase :Tuple=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict="gelu" , _lowercase :List[Any]=0.02 , _lowercase :str=0.1 , _lowercase :Optional[Any]=0.2 , _lowercase :Tuple=0 , **_lowercase :Union[str, Any] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = sinusoidal_pos_embds
UpperCAmelCase_ = n_layers
UpperCAmelCase_ = n_heads
UpperCAmelCase_ = dim
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = qa_dropout
UpperCAmelCase_ = seq_classif_dropout
super().__init__(**_lowercase , pad_token_id=_lowercase)
class a_ ( _snake_case ):
@property
def __a ( self :Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
UpperCamelCase_ = {}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase_ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase_ = _calculate(days - 1 , __UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase_ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase_ = _calculate(days - 1 , __UpperCAmelCase , 0 )
UpperCAmelCase_ = state_late + state_absent + state_ontime
UpperCAmelCase_ = prizestrings
return prizestrings
def A ( __UpperCAmelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(__UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
import math
class a_ :
def __init__( self :Dict , _lowercase :Union[str, Any]=0) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ = n
UpperCAmelCase_ = [
[math.inf for j in range(0 , _lowercase)] for i in range(0 , _lowercase)
] # adjacency matrix for weight
UpperCAmelCase_ = [
[math.inf for j in range(0 , _lowercase)] for i in range(0 , _lowercase)
] # dp[i][j] stores minimum distance from i to j
def __a ( self :Any , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Optional[int]) -> Any:
UpperCAmelCase_ = w
def __a ( self :str) -> Optional[Any]:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
UpperCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def __a ( self :int , _lowercase :Tuple , _lowercase :Optional[int]) -> List[str]:
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCamelCase_ = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCamelCase_ = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
UpperCamelCase_ = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def __a ( self :Any) -> List[str]:
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __a ( self :Dict , _lowercase :Tuple , _lowercase :str , _lowercase :int = CHRF.CHAR_ORDER , _lowercase :int = CHRF.WORD_ORDER , _lowercase :int = CHRF.BETA , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , ) -> Any:
UpperCAmelCase_ = len(references[0])
if any(len(_lowercase) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_lowercase)]
UpperCAmelCase_ = CHRF(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
UpperCAmelCase_ = sb_chrf.corpus_score(_lowercase , _lowercase)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple ="rwkv"
UpperCamelCase__ : str ={"max_position_embeddings": "context_length"}
def __init__( self :List[Any] , _lowercase :Optional[int]=50277 , _lowercase :Optional[Any]=1024 , _lowercase :int=4096 , _lowercase :Union[str, Any]=32 , _lowercase :Tuple=None , _lowercase :Tuple=None , _lowercase :Optional[int]=1E-5 , _lowercase :Optional[Any]=0 , _lowercase :int=0 , _lowercase :int=6 , _lowercase :Optional[int]=False , _lowercase :Optional[Any]=True , **_lowercase :Union[str, Any] , ) -> List[str]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = context_length
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = rescale_every
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase)
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase_ = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase )) % modulo_value
def A ( __UpperCAmelCase = 1777 , __UpperCAmelCase = 1855 , __UpperCAmelCase = 8 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = base
for _ in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] ="van"
def __init__( self :Any , _lowercase :Union[str, Any]=224 , _lowercase :int=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Optional[Any]=[4, 2, 2, 2] , _lowercase :Optional[int]=[64, 128, 320, 512] , _lowercase :Optional[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :Optional[int]="gelu" , _lowercase :str=0.02 , _lowercase :Optional[int]=1E-6 , _lowercase :Optional[Any]=1E-2 , _lowercase :Union[str, Any]=0.0 , _lowercase :Tuple=0.0 , **_lowercase :List[Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_sizes
UpperCAmelCase_ = strides
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = mlp_ratios
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = layer_scale_init_value
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = dropout_rate
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
UpperCAmelCase_ = time.time()
locka.acquire(__UpperCAmelCase )
assert time.time() - _start > timeout
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = '''a''' * 1000 + '''.lock'''
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__UpperCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
locka.acquire(0 )
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a_ ( unittest.TestCase ):
UpperCamelCase__ : Dict =JukeboxTokenizer
UpperCamelCase__ : Dict ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def __a ( self :Dict) -> Tuple:
import torch
UpperCAmelCase_ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''')
UpperCAmelCase_ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
UpperCAmelCase_ = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 1069, 11]]),
torch.tensor([[0, 0, 0, 1069, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def __a ( self :List[Any]) -> str:
import torch
UpperCAmelCase_ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''')
UpperCAmelCase_ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
UpperCAmelCase_ = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( _snake_case ):
UpperCamelCase__ : str ="megatron-bert"
def __init__( self :List[Any] , _lowercase :List[str]=29056 , _lowercase :List[str]=1024 , _lowercase :List[str]=24 , _lowercase :Tuple=16 , _lowercase :Optional[int]=4096 , _lowercase :str="gelu" , _lowercase :Optional[Any]=0.1 , _lowercase :List[Any]=0.1 , _lowercase :Tuple=512 , _lowercase :str=2 , _lowercase :str=0.02 , _lowercase :Union[str, Any]=1E-1_2 , _lowercase :List[Any]=0 , _lowercase :Tuple="absolute" , _lowercase :Union[str, Any]=True , **_lowercase :str , ) -> Any:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class a_ ( _snake_case ):
@add_start_docstrings(_lowercase)
def __call__( self :Union[str, Any] , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :List[str]) -> bool:
raise NotImplementedError('''StoppingCriteria needs to be subclassed''')
class a_ ( _snake_case ):
def __init__( self :Optional[int] , _lowercase :int , _lowercase :Optional[int] = None) -> List[Any]:
UpperCAmelCase_ = max_length
UpperCAmelCase_ = max_position_embeddings
@add_start_docstrings(_lowercase)
def __call__( self :Dict , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Any) -> bool:
UpperCAmelCase_ = input_ids.shape[-1]
UpperCAmelCase_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''')
return is_done
class a_ ( _snake_case ):
def __init__( self :Optional[int] , _lowercase :int , _lowercase :int) -> str:
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , _lowercase , )
UpperCAmelCase_ = start_length
UpperCAmelCase_ = max_new_tokens
UpperCAmelCase_ = start_length + max_new_tokens
@add_start_docstrings(_lowercase)
def __call__( self :Any , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Union[str, Any]) -> bool:
return input_ids.shape[-1] >= self.max_length
class a_ ( _snake_case ):
def __init__( self :Any , _lowercase :float , _lowercase :Optional[float] = None) -> Union[str, Any]:
UpperCAmelCase_ = max_time
UpperCAmelCase_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_lowercase)
def __call__( self :List[str] , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Union[str, Any]) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class a_ ( _snake_case ):
@add_start_docstrings(_lowercase)
def __call__( self :int , _lowercase :torch.LongTensor , _lowercase :torch.FloatTensor , **_lowercase :Optional[Any]) -> bool:
return any(criteria(_lowercase , _lowercase) for criteria in self)
@property
def __a ( self :Any) -> Optional[int]:
for stopping_criterium in self:
if isinstance(_lowercase , _lowercase):
return stopping_criterium.max_length
elif isinstance(_lowercase , _lowercase):
return stopping_criterium.max_length
return None
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
UpperCAmelCase_ = stopping_criteria.max_length
UpperCAmelCase_ = deepcopy(__UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__UpperCAmelCase ) )
return new_stopping_criteria
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ViTFeatureExtractor"]
UpperCamelCase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a_ ( _snake_case ):
def __init__( self :int , _lowercase :int , _lowercase :Dict=13 , _lowercase :List[Any]=7 , _lowercase :Optional[int]=True , _lowercase :Tuple=True , _lowercase :List[Any]=False , _lowercase :Any=True , _lowercase :Dict=99 , _lowercase :List[str]=32 , _lowercase :Tuple=5 , _lowercase :Optional[Any]=4 , _lowercase :List[str]=37 , _lowercase :Optional[int]="gelu" , _lowercase :List[str]=0.1 , _lowercase :Dict=0.1 , _lowercase :Optional[int]=512 , _lowercase :Tuple=16 , _lowercase :Any=2 , _lowercase :List[str]=0.02 , _lowercase :int=3 , _lowercase :Union[str, Any]=4 , _lowercase :Tuple=None , ) -> Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def __a ( self :str) -> Union[str, Any]:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self :Any) -> Dict:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __a ( self :Any , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :int , _lowercase :List[Any] , _lowercase :Tuple , _lowercase :Tuple) -> Optional[Any]:
UpperCAmelCase_ = DistilBertModel(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :List[str] , _lowercase :Dict , _lowercase :List[str] , _lowercase :Dict , _lowercase :Union[str, Any] , _lowercase :Any , _lowercase :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = DistilBertForMaskedLM(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :Any , _lowercase :List[Any] , _lowercase :Optional[int] , _lowercase :List[str] , _lowercase :Optional[Any]) -> Dict:
UpperCAmelCase_ = DistilBertForQuestionAnswering(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(
_lowercase , attention_mask=_lowercase , start_positions=_lowercase , end_positions=_lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __a ( self :Any , _lowercase :Optional[int] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :List[Any] , _lowercase :Union[str, Any]) -> str:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = DistilBertForSequenceClassification(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :Dict , _lowercase :List[str] , _lowercase :List[Any] , _lowercase :Optional[Any] , _lowercase :int , _lowercase :List[str] , _lowercase :Union[str, Any]) -> Tuple:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = DistilBertForTokenClassification(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __a ( self :Optional[int] , _lowercase :Any , _lowercase :Dict , _lowercase :Dict , _lowercase :int , _lowercase :Dict , _lowercase :Optional[int]) -> Any:
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = DistilBertForMultipleChoice(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_lowercase , attention_mask=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __a ( self :List[str]) -> Optional[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ : List[Any] =(
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : Dict =True
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = DistilBertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , dim=37)
def __a ( self :Optional[Any]) -> List[Any]:
self.config_tester.run_common_tests()
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase)
def __a ( self :Tuple) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase)
def __a ( self :Optional[int]) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase)
def __a ( self :Any) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase)
@slow
def __a ( self :Tuple) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DistilBertModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
@slow
@require_torch_gpu
def __a ( self :List[Any]) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(config=_lowercase)
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase)
UpperCAmelCase_ = torch.jit.trace(
_lowercase , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , '''traced_model.pt'''))
UpperCAmelCase_ = torch.jit.load(os.path.join(_lowercase , '''traced_model.pt''') , map_location=_lowercase)
loaded(inputs_dict['''input_ids'''].to(_lowercase) , inputs_dict['''attention_mask'''].to(_lowercase))
@require_torch
class a_ ( unittest.TestCase ):
@slow
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = DistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase , attention_mask=_lowercase)[0]
UpperCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , _lowercase)
UpperCAmelCase_ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4))
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase = 0 ) -> list:
'''simple docstring'''
UpperCAmelCase_ = length or len(__UpperCAmelCase )
UpperCAmelCase_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ = list_data[i + 1], list_data[i]
UpperCAmelCase_ = True
return list_data if not swapped else bubble_sort(__UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import string
from math import logaa
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
UpperCAmelCase_ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
UpperCAmelCase_ = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase_ = corpus_without_punctuation.split('''\n''' )
UpperCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__UpperCAmelCase ))
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
from math import sqrt
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__UpperCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"{solution() = }")
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : List[Any] =BioGptTokenizer
UpperCamelCase__ : str =False
def __a ( self :int) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(_lowercase , range(len(_lowercase))))
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(_lowercase))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(_lowercase))
def __a ( self :Optional[int] , _lowercase :Union[str, Any]) -> Optional[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file)
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , _lowercase)
@slow
def __a ( self :Optional[Any]) -> Union[str, Any]:
UpperCAmelCase_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
import math
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
while num > 0:
UpperCAmelCase_ = num % 8
UpperCAmelCase_ = octal + (remainder * math.floor(math.pow(10 , __UpperCAmelCase ) ))
counter += 1
UpperCAmelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(__UpperCAmelCase )}"
def A ( ) -> None:
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 350 |
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
def __init__( self :Union[str, Any] , *_lowercase :Any , **_lowercase :List[Any]) -> None:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase)
| 351 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self :Any , _lowercase :Union[str, Any] , _lowercase :List[str]=13 , _lowercase :Tuple=30 , _lowercase :Optional[Any]=2 , _lowercase :List[Any]=3 , _lowercase :int=True , _lowercase :Optional[int]=True , _lowercase :Any=32 , _lowercase :Optional[Any]=5 , _lowercase :List[Any]=4 , _lowercase :Any=37 , _lowercase :List[str]="gelu" , _lowercase :List[str]=0.1 , _lowercase :Any=0.1 , _lowercase :List[str]=10 , _lowercase :Optional[Any]=0.02 , _lowercase :List[Any]=3 , _lowercase :List[Any]=None , _lowercase :Optional[Any]=2 , ) -> str:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 2
def __a ( self :str) -> Tuple:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self :Any) -> Union[str, Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self :List[str] , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Tuple) -> Optional[int]:
UpperCAmelCase_ = DeiTModel(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :Tuple , _lowercase :str , _lowercase :List[Any] , _lowercase :Optional[Any]) -> str:
UpperCAmelCase_ = DeiTForMaskedImageModeling(config=_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = DeiTForMaskedImageModeling(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(_lowercase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def __a ( self :Any , _lowercase :List[Any] , _lowercase :int , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = DeiTForImageClassification(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = model(_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = DeiTForImageClassification(_lowercase)
model.to(_lowercase)
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __a ( self :Any) -> int:
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : List[Any] =(
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : List[str] =False
def __a ( self :Union[str, Any]) -> Tuple:
UpperCAmelCase_ = DeiTModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37)
def __a ( self :Union[str, Any]) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''')
def __a ( self :List[str]) -> Dict:
pass
def __a ( self :int) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_lowercase)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase)
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __a ( self :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase)
def __a ( self :List[Any]) -> List[Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase)
def __a ( self :Dict , _lowercase :List[Any] , _lowercase :int , _lowercase :Union[str, Any]=False) -> List[str]:
UpperCAmelCase_ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __a ( self :int) -> List[str]:
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowercase)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ = model_class(_lowercase)
model.to(_lowercase)
model.train()
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
UpperCAmelCase_ = model(**_lowercase).loss
loss.backward()
def __a ( self :Dict) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ = model_class(_lowercase)
model.gradient_checkpointing_enable()
model.to(_lowercase)
model.train()
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
UpperCAmelCase_ = model(**_lowercase).loss
loss.backward()
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowercase),
*get_values(_lowercase),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"):
UpperCAmelCase_ = problem_type['''title''']
UpperCAmelCase_ = problem_type['''num_labels''']
UpperCAmelCase_ = model_class(_lowercase)
model.to(_lowercase)
model.train()
UpperCAmelCase_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase)
if problem_type["num_labels"] > 1:
UpperCAmelCase_ = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
UpperCAmelCase_ = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowercase) as warning_list:
UpperCAmelCase_ = model(**_lowercase).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}")
loss.backward()
@slow
def __a ( self :Dict) -> Any:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DeiTModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def __a ( self :Tuple) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''')
if is_vision_available()
else None
)
@slow
def __a ( self :Optional[int]) -> int:
UpperCAmelCase_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''').to(
_lowercase)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''pt''').to(_lowercase)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_lowercase)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _lowercase)
UpperCAmelCase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861]).to(_lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_lowercase , return_tensors='''pt''')
UpperCAmelCase_ = inputs.pixel_values.to(_lowercase)
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase)
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A ( __UpperCAmelCase ) -> List[str]: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def A ( __UpperCAmelCase ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class a_ :
UpperCamelCase__ : int
UpperCamelCase__ : str
class a_ ( _snake_case ):
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 1
UpperCAmelCase_ = [1, 2]
UpperCAmelCase_ = {'''a''': 1, '''b''': 2}
UpperCAmelCase_ = {'''a''': [1, 2], '''b''': [3, 4]}
UpperCAmelCase_ = {'''a''': {'''1''': 1}, '''b''': 2}
UpperCAmelCase_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = [2, 3]
UpperCAmelCase_ = {'''a''': 2, '''b''': 3}
UpperCAmelCase_ = {'''a''': [2, 3], '''b''': [4, 5]}
UpperCAmelCase_ = {'''a''': {'''1''': 2}, '''b''': 3}
UpperCAmelCase_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase) , _lowercase)
UpperCAmelCase_ = 2
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase) , _lowercase)
UpperCAmelCase_ = {'''a''': np.eye(2), '''b''': np.zeros(3), '''c''': np.ones(2)}
UpperCAmelCase_ = {'''a''': 2, '''b''': 0, '''c''': 2}
UpperCAmelCase_ = {
'''a''': np.eye(2).astype(_lowercase),
'''b''': np.zeros(3).astype(_lowercase),
'''c''': np.ones(2).astype(_lowercase),
}
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase) , _lowercase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase) , _lowercase)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowercase): # can't pickle a local lambda
map_nested(lambda _lowercase: x + 1 , _lowercase , num_proc=_lowercase)
def __a ( self :Tuple) -> Any:
UpperCAmelCase_ = {'''a''': 1, '''b''': 2}
UpperCAmelCase_ = {'''a''': 3, '''b''': 4}
UpperCAmelCase_ = {'''a''': 5, '''b''': 6}
UpperCAmelCase_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))])
self.assertEqual(sorted(zip_dict(_lowercase , _lowercase , _lowercase)) , _lowercase)
def __a ( self :Tuple) -> int:
class a_ :
UpperCamelCase__ : Tuple ="bar"
UpperCAmelCase_ = Foo()
self.assertEqual(foo.my_attr , '''bar''')
with temporary_assignment(_lowercase , '''my_attr''' , '''BAR'''):
self.assertEqual(foo.my_attr , '''BAR''')
self.assertEqual(foo.my_attr , '''bar''')
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
UpperCAmelCase_ = {f"{i}": i for i in range(__UpperCAmelCase )}
UpperCAmelCase_ = map_nested(lambda __UpperCAmelCase : x + 10 , __UpperCAmelCase , num_proc=__UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a_ ( _snake_case ):
@require_tf
def __a ( self :List[Any]) -> List[Any]:
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase_ = layers.Dense(2)
def gen_random_output():
UpperCAmelCase_ = tf.random.uniform((1, 3))
return model(_lowercase).numpy()
with temp_seed(42 , set_tensorflow=_lowercase):
UpperCAmelCase_ = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowercase):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def __a ( self :str) -> Optional[Any]:
import torch
def gen_random_output():
UpperCAmelCase_ = torch.nn.Linear(3 , 2)
UpperCAmelCase_ = torch.rand(1 , 3)
return model(_lowercase).detach().numpy()
with temp_seed(42 , set_pytorch=_lowercase):
UpperCAmelCase_ = gen_random_output()
with temp_seed(42 , set_pytorch=_lowercase):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def __a ( self :Any) -> Tuple:
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
UpperCAmelCase_ = gen_random_output()
with temp_seed(42):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize('''input_data''' , [{}] )
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = NestedDataStructure(__UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = NestedDataStructure(__UpperCAmelCase ).flatten()
assert output == expected_output
def A ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = A(x=1 , y='''foobar''' )
UpperCAmelCase_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__UpperCAmelCase ) == expected_output
UpperCAmelCase_ = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
UpperCAmelCase_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__UpperCAmelCase ) == expected_output
with pytest.raises(__UpperCAmelCase ):
asdict([1, A(x=10 , y='''foo''' )] )
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return text.split()
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A ( ) -> List[str]:
'''simple docstring'''
with Pool(2 ) as pool:
UpperCAmelCase_ = list(iflatmap_unordered(__UpperCAmelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase_ = list(iflatmap_unordered(__UpperCAmelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase_ = []
for yield_time, content in iflatmap_unordered(
__UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCAmelCase )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__UpperCAmelCase ) == 4 | 353 |
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=_snake_case ):
UpperCamelCase__ : Any =["torch", "scipy"]
def __init__( self :List[str] , *_lowercase :List[str] , **_lowercase :Union[str, Any]) -> List[Any]:
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Dict , *_lowercase :Any , **_lowercase :Dict) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def __a ( cls :Optional[Any] , *_lowercase :str , **_lowercase :Optional[Any]) -> Union[str, Any]:
requires_backends(cls , ['''torch''', '''scipy'''])
| 344 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = "▁"
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase_ = {
"google/reformer-crime-and-punishment": 524_288,
}
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any =["input_ids", "attention_mask"]
def __init__( self :List[str] , _lowercase :List[str] , _lowercase :List[Any]="</s>" , _lowercase :List[str]="<unk>" , _lowercase :Dict=[] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Optional[int] , ) -> None:
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :Any) -> List[Any]:
return self.sp_model.get_piece_size()
def __a ( self :Any) -> Dict[str, int]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :List[str]) -> int:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :str) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase)
def __a ( self :Tuple , _lowercase :Tuple) -> Tuple:
return self.sp_model.piece_to_id(_lowercase)
def __a ( self :Optional[Any] , _lowercase :Tuple) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase_ = self.sp_model.IdToPiece(_lowercase)
return token
def __a ( self :Any , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_lowercase)
out_string += self.sp_model.decode(_lowercase)
return out_string.strip()
def __a ( self :Optional[Any] , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 354 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class a_ ( _snake_case ):
UpperCamelCase__ : str =field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCamelCase__ : ClassVar[Features] =Features({"image": Image()} )
UpperCamelCase__ : ClassVar[Features] =Features({"labels": ClassLabel} )
UpperCamelCase__ : str ="image"
UpperCamelCase__ : str ="labels"
def __a ( self :Union[str, Any] , _lowercase :Any) -> Optional[int]:
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column] , _lowercase):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
UpperCAmelCase_ = copy.deepcopy(self)
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def __a ( self :Union[str, Any]) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 0 |
from collections import deque
from .hash_table import HashTable
class a_ ( _snake_case ):
def __init__( self :Union[str, Any] , *_lowercase :Optional[int] , **_lowercase :List[str]) -> int:
super().__init__(*_lowercase , **_lowercase)
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Optional[Any]) -> List[str]:
UpperCAmelCase_ = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowercase)
UpperCAmelCase_ = self.values[key]
def __a ( self :Optional[Any]) -> List[Any]:
return (
sum(self.charge_factor - len(_lowercase) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def __a ( self :Any , _lowercase :Union[str, Any] , _lowercase :int=None) -> Tuple:
if not (
len(self.values[key]) == self.charge_factor and self.values.count(_lowercase) == 0
):
return key
return super()._collision_resolution(_lowercase , _lowercase)
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="mctct"
def __init__( self :Optional[Any] , _lowercase :int=8065 , _lowercase :str=1536 , _lowercase :Any=36 , _lowercase :Union[str, Any]=6144 , _lowercase :Optional[Any]=4 , _lowercase :str=384 , _lowercase :List[str]=920 , _lowercase :Optional[Any]=1E-5 , _lowercase :Tuple=0.3 , _lowercase :str="relu" , _lowercase :Any=0.02 , _lowercase :Optional[int]=0.3 , _lowercase :Dict=0.3 , _lowercase :List[str]=1 , _lowercase :str=0 , _lowercase :List[str]=2 , _lowercase :str=1 , _lowercase :Dict=0.3 , _lowercase :Optional[Any]=1 , _lowercase :Optional[Any]=(7,) , _lowercase :List[str]=(3,) , _lowercase :Tuple=80 , _lowercase :Any=1 , _lowercase :Dict=None , _lowercase :Optional[int]="sum" , _lowercase :List[str]=False , **_lowercase :Optional[int] , ) -> Optional[Any]:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = conv_glu_dim
UpperCAmelCase_ = conv_dropout
UpperCAmelCase_ = num_conv_layers
UpperCAmelCase_ = input_feat_per_channel
UpperCAmelCase_ = input_channels
UpperCAmelCase_ = conv_channels
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCAmelCase_ = list(_lowercase)
UpperCAmelCase_ = list(_lowercase)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`.")
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class a_ ( datasets.BuilderConfig ):
UpperCamelCase__ : Optional[datasets.Features] =None
class a_ ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ : Optional[int] =PandasConfig
def __a ( self :int) -> int:
return datasets.DatasetInfo(features=self.config.features)
def __a ( self :List[str] , _lowercase :Dict) -> Tuple:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_lowercase , (str, list, tuple)):
UpperCAmelCase_ = data_files
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(_lowercase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ = [dl_manager.iter_files(_lowercase) for file in files]
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={'''files''': files}))
return splits
def __a ( self :Optional[Any] , _lowercase :pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(_lowercase , self.config.features.arrow_schema)
return pa_table
def __a ( self :List[Any] , _lowercase :Union[str, Any]) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(_lowercase)):
with open(_lowercase , '''rb''') as f:
UpperCAmelCase_ = pa.Table.from_pandas(pd.read_pickle(_lowercase))
yield i, self._cast_table(_lowercase)
| 358 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple =["pixel_values"]
def __init__( self :List[str] , _lowercase :bool = True , _lowercase :Optional[Dict[str, int]] = None , _lowercase :PILImageResampling = PILImageResampling.BILINEAR , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[float, List[float]]] = None , **_lowercase :Tuple , ) -> None:
super().__init__(**_lowercase)
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase)
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ = get_size_dict(_lowercase , param_name='''crop_size''')
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self :Any , _lowercase :np.ndarray , _lowercase :Dict[str, int] , _lowercase :PILImageResampling = PILImageResampling.BICUBIC , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Union[str, Any] , ) -> np.ndarray:
UpperCAmelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
UpperCAmelCase_ = get_resize_output_image_size(_lowercase , size=size['''shortest_edge'''] , default_to_square=_lowercase)
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase)
def __a ( self :Optional[int] , _lowercase :np.ndarray , _lowercase :Dict[str, int] , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Union[str, Any] , ) -> np.ndarray:
UpperCAmelCase_ = get_size_dict(_lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(_lowercase , size=(size['''height'''], size['''width''']) , data_format=_lowercase , **_lowercase)
def __a ( self :List[str] , _lowercase :np.ndarray , _lowercase :float , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Optional[Any]) -> np.ndarray:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase)
def __a ( self :Union[str, Any] , _lowercase :np.ndarray , _lowercase :Union[float, List[float]] , _lowercase :Union[float, List[float]] , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :int , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase)
def __a ( self :Optional[int] , _lowercase :ImageInput , _lowercase :Optional[bool] = None , _lowercase :Dict[str, int] = None , _lowercase :PILImageResampling = None , _lowercase :bool = None , _lowercase :Dict[str, int] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[float] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[str, TensorType]] = None , _lowercase :Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase :Dict , ) -> int:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase)
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_lowercase , param_name='''crop_size''')
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(_lowercase)
if not valid_images(_lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_lowercase) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=_lowercase , size=_lowercase) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_lowercase , scale=_lowercase) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_lowercase , _lowercase) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase)
def __a ( self :Tuple , _lowercase :Optional[Any] , _lowercase :List[Tuple] = None) -> List[Any]:
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase) != len(_lowercase):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(_lowercase):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(_lowercase)):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase)
UpperCAmelCase_ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(_lowercase)
else:
UpperCAmelCase_ = logits.argmax(dim=1)
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
@slow
def __a ( self :Any) -> Any:
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3))
@slow
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_lowercase)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3))
| 361 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 0 |
import datasets
from .evaluate import evaluate
UpperCamelCase_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
UpperCamelCase_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
UpperCamelCase_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def __a ( self :Optional[int]) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string'''), '''prediction_text''': datasets.Value('''string''')},
'''references''': {
'''id''': datasets.Value('''string'''),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string'''),
'''answer_start''': datasets.Value('''int32'''),
}),
},
}) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __a ( self :str , _lowercase :Dict , _lowercase :Optional[int]) -> Dict:
UpperCAmelCase_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
UpperCAmelCase_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
UpperCAmelCase_ = evaluate(dataset=_lowercase , predictions=_lowercase)
return score
| 362 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
UpperCAmelCase_ = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase_ = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
UpperCAmelCase_ = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = requests.get(url + f"&page={i + 2}" , headers=__UpperCAmelCase ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
UpperCAmelCase_ = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
UpperCAmelCase_ = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
UpperCAmelCase_ = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
UpperCAmelCase_ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCAmelCase_ = requests.get(url + f"&page={i + 2}" , headers=__UpperCAmelCase ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = None
if token is not None:
UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
UpperCAmelCase_ = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase , allow_redirects=__UpperCAmelCase )
UpperCAmelCase_ = result.headers['''Location''']
UpperCAmelCase_ = requests.get(__UpperCAmelCase , allow_redirects=__UpperCAmelCase )
UpperCAmelCase_ = os.path.join(__UpperCAmelCase , f"{artifact_name}.zip" )
with open(__UpperCAmelCase , '''wb''' ) as fp:
fp.write(response.content )
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = None
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__UpperCAmelCase ) as f:
for line in f:
UpperCAmelCase_ = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase_ = line[: line.index(''': ''' )]
UpperCAmelCase_ = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
UpperCAmelCase_ = line[len('''FAILED ''' ) :]
failed_tests.append(__UpperCAmelCase )
elif filename == "job_name.txt":
UpperCAmelCase_ = line
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__UpperCAmelCase )} for `errors` "
f"and {len(__UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
''' problem.''' )
UpperCAmelCase_ = None
if job_name and job_links:
UpperCAmelCase_ = job_links.get(__UpperCAmelCase , __UpperCAmelCase )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase_ = [x + [y] + [job_link] for x, y in zip(__UpperCAmelCase , __UpperCAmelCase )]
return result
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = [os.path.join(__UpperCAmelCase , __UpperCAmelCase ) for p in os.listdir(__UpperCAmelCase ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__UpperCAmelCase , job_links=__UpperCAmelCase ) )
return errors
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase_ = counter.most_common()
UpperCAmelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase_ = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __UpperCAmelCase : item[1]["count"] , reverse=__UpperCAmelCase ) )
return r
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
UpperCAmelCase_ = test.split('''/''' )[2]
else:
UpperCAmelCase_ = None
return test
def A ( __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase_ = [x for x in logs if x[2] is not None]
UpperCAmelCase_ = {x[2] for x in logs}
UpperCAmelCase_ = {}
for test in tests:
UpperCAmelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase_ = counter.most_common()
UpperCAmelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase_ = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase_ = {'''count''': n_errors, '''errors''': error_counts}
UpperCAmelCase_ = dict(sorted(r.items() , key=lambda __UpperCAmelCase : item[1]["count"] , reverse=__UpperCAmelCase ) )
return r
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = '''| no. | error | status |'''
UpperCAmelCase_ = '''|-:|:-|:-|'''
UpperCAmelCase_ = [header, sep]
for error in reduced_by_error:
UpperCAmelCase_ = reduced_by_error[error]['''count''']
UpperCAmelCase_ = f"| {count} | {error[:100]} | |"
lines.append(__UpperCAmelCase )
return "\n".join(__UpperCAmelCase )
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = '''| model | no. of errors | major error | count |'''
UpperCAmelCase_ = '''|-:|-:|-:|-:|'''
UpperCAmelCase_ = [header, sep]
for model in reduced_by_model:
UpperCAmelCase_ = reduced_by_model[model]['''count''']
UpperCAmelCase_ , UpperCAmelCase_ = list(reduced_by_model[model]['''errors'''].items() )[0]
UpperCAmelCase_ = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__UpperCAmelCase )
return "\n".join(__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
UpperCamelCase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase_ = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase_ = k.find(" / ")
UpperCamelCase_ = k[index + len(" / ") :]
UpperCamelCase_ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase_ = reduce_by_error(errors)
UpperCamelCase_ = reduce_by_model(errors)
UpperCamelCase_ = make_github_table(reduced_by_error)
UpperCamelCase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 363 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344 | 0 |
from maths.prime_check import is_prime
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = f"Input value of [number={number}] must be an integer"
raise TypeError(__UpperCAmelCase )
if is_prime(__UpperCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344 | 0 |
import gc
import threading
import time
import psutil
import torch
class a_ :
def __init__( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ = psutil.Process()
UpperCAmelCase_ = False
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = -1
while True:
UpperCAmelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __a ( self :str) -> Dict:
UpperCAmelCase_ = True
UpperCAmelCase_ = threading.Thread(target=self.peak_monitor)
UpperCAmelCase_ = True
self.thread.start()
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = False
self.thread.join()
return self.cpu_memory_peak
UpperCamelCase_ = PeakCPUMemory()
def A ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = torch.cuda.memory_allocated(__UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
UpperCAmelCase_ = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
UpperCAmelCase_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
return measures
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" )
UpperCAmelCase_ = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 365 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 344 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class a_ ( unittest.TestCase ):
def __a ( self :Union[str, Any]) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=_lowercase , )
assert hasattr(self , '''env''')
def __a ( self :Tuple , _lowercase :Tuple=1) -> Dict:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def __a ( self :Union[str, Any] , _lowercase :Any) -> List[Any]:
TrainingJobAnalytics(_lowercase).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
def __a ( self :int) -> str:
# create estimator
UpperCAmelCase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowercase)
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 367 |
import functools
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = len(__UpperCAmelCase )
@functools.cache
def min_distance(__UpperCAmelCase , __UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCAmelCase ) , 1 + min_distance(__UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :List[str] , _lowercase :List[str] , _lowercase :Optional[int]=7 , _lowercase :Optional[Any]=3 , _lowercase :str=18 , _lowercase :Optional[Any]=30 , _lowercase :Union[str, Any]=400 , _lowercase :Any=True , _lowercase :List[str]=None , _lowercase :Optional[Any]=True , ) -> Union[str, Any]:
UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = apply_ocr
def __a ( self :Union[str, Any]) -> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Any =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __a ( self :Optional[int]) -> Tuple:
UpperCAmelCase_ = LayoutLMvaImageProcessingTester(self)
@property
def __a ( self :str) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''apply_ocr'''))
def __a ( self :str) -> Dict:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18})
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
def __a ( self :Any) -> List[str]:
pass
def __a ( self :List[str]) -> List[str]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _lowercase)
self.assertIsInstance(encoding.boxes , _lowercase)
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __a ( self :List[Any]) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __a ( self :Dict) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __a ( self :List[Any]) -> List[Any]:
# with apply_OCR = True
UpperCAmelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''')
UpperCAmelCase_ = Image.open(ds[0]['''file''']).convert('''RGB''')
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCAmelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase)
self.assertListEqual(encoding.boxes , _lowercase)
# with apply_OCR = False
UpperCAmelCase_ = LayoutLMvaImageProcessor(apply_ocr=_lowercase)
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=None , __UpperCAmelCase="no" , __UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase_ = True
elif "IPython" in sys.modules:
UpperCAmelCase_ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase_ = 8
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''TPU''' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , distributed_type='''MULTI_GPU''' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase_ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCAmelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase=() , __UpperCAmelCase=2 ) -> Optional[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase_ = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method='''fork''' )
| 370 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 0 |
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
UpperCAmelCase_ = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase_ = input("Enter a string ").strip()
UpperCamelCase_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 371 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :Tuple , _lowercase :List[Any] , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :int = 32 , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _lowercase :Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _lowercase :bool = True , _lowercase :List[Any]=7 , _lowercase :Dict=30 , _lowercase :Optional[int]=400 , _lowercase :Any=3 , ) -> Any:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def __a ( self :str) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self :List[Any] , _lowercase :Tuple , _lowercase :List[str]=False) -> int:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(_lowercase , Image.Image):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(_lowercase , _lowercase)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size)
if max(_lowercase , _lowercase) > max_size:
UpperCAmelCase_ = max_size / max(_lowercase , _lowercase)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5), int(neww + 0.5)
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[0])[0]
UpperCAmelCase_ = max(_lowercase , key=lambda _lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self :int) -> Dict:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self)
@property
def __a ( self :Dict) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''size_divisor'''))
def __a ( self :Union[str, Any]) -> Tuple:
pass
def __a ( self :List[str]) -> Tuple:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :Union[str, Any]) -> Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self :str) -> int:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
"""simple docstring"""
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Dict = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : Tuple = 12
elif "14-14" in model_name:
a : Any = 14
a : str = 14
elif "16-16" in model_name:
a : int = 16
a : Any = 16
else:
raise ValueError('Model not supported' )
a : Optional[int] = 'huggingface/label-files'
if "speech-commands" in model_name:
a : int = 35
a : List[str] = 'speech-commands-v2-id2label.json'
else:
a : Optional[int] = 527
a : int = 'audioset-id2label.json'
a : Tuple = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : Union[str, Any] = idalabel
a : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Dict:
"""simple docstring"""
if "module.v" in name:
a : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
a : Tuple = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
a : Tuple = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
a : Dict = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
a : List[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
a : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Optional[int] = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
a : int = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(snake_case )
if "qkv" in key:
a : Union[str, Any] = key.split('.' )
a : Dict = int(key_split[3] )
a : Optional[int] = config.hidden_size
if "weight" in key:
a : int = val[:dim, :]
a : Tuple = val[dim : dim * 2, :]
a : Optional[int] = val[-dim:, :]
else:
a : Dict = val[:dim]
a : List[Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : Any = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : List[str] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Optional[int] , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : Optional[int] = get_audio_spectrogram_transformer_config(snake_case )
a : int = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
a : Union[str, Any] = model_name_to_url[model_name]
a : Dict = torch.hub.load_state_dict_from_url(snake_case , map_location='cpu' )
# remove some keys
remove_keys(snake_case )
# rename some keys
a : Optional[int] = convert_state_dict(snake_case , snake_case )
# load 🤗 model
a : Dict = ASTForAudioClassification(snake_case )
model.eval()
model.load_state_dict(snake_case )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Optional[int] = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
a : Dict = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
a : str = 1_024 if 'speech-commands' not in model_name else 128
a : Dict = ASTFeatureExtractor(mean=snake_case , std=snake_case , max_length=snake_case )
if "speech-commands" in model_name:
a : List[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
a : str = dataset[0]['audio']['array']
else:
a : List[str] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
a , a : Union[str, Any] = torchaudio.load(snake_case )
a : Union[str, Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(snake_case , sampling_rate=16_000 , return_tensors='pt' )
# forward pass
a : List[Any] = model(**snake_case )
a : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Any = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : Optional[Any] = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Optional[Any] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : Union[str, Any] = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : int = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : int = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Tuple = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , snake_case , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(snake_case )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase : int = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 345 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
def SCREAMING_SNAKE_CASE__ ( *snake_case : str , **snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : List[Any] ) -> str:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : Dict ) -> Any:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : List[Any] , **snake_case : int ) -> List[Any]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[Any] , **snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[Any] , **snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Any , **snake_case : List[str] ) -> str:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 1 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> bool:
"""simple docstring"""
a : Optional[int] = num - 1
a : List[str] = 0
while s % 2 == 0:
a : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
a : Any = random.randrange(2 , num - 1 )
a : Any = pow(snake_case , snake_case , snake_case )
if v != 1:
a : List[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
a : List[Any] = i + 1
a : List[Any] = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
a : List[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 1_024 ) -> int:
"""simple docstring"""
while True:
a : Optional[int] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case ):
return num
if __name__ == "__main__":
UpperCamelCase : Any = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 345 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : str = PegasusTokenizer
A : Dict = PegasusTokenizerFast
A : str = True
A : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = PegasusTokenizer(UpperCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large')
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = '</s>'
a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '</s>')
self.assertEqual(vocab_keys[-1] , 'v')
self.assertEqual(len(UpperCAmelCase_) , 1_1_0_3)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a : int = self.tokenizer_class.from_pretrained(self.tmpdirname)
a : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
a : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
a : Dict = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a : str = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
a : Tuple = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a : str = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
a : List[str] = 'To ensure a smooth flow of bank resolutions.'
a : Any = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Any = ['This is going to be way too long.' * 1_5_0, 'short example']
a : str = ['not super long but more than 5 tokens', 'tiny']
a : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = PegasusTokenizer
A : Any = PegasusTokenizerFast
A : List[str] = True
A : str = True
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a : Union[str, Any] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token='[MASK]')
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname)
a : Any = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
a : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
a : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : str = ['This is going to be way too long.' * 1_0_0_0, 'short example']
a : int = ['not super long but more than 5 tokens', 'tiny']
a : List[Any] = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
a : Dict = self._large_tokenizer(UpperCAmelCase_).input_ids
self.assertListEqual(
UpperCAmelCase_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 345 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
a : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
a : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[str]=False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a : Union[str, Any] = ''
else:
a : int = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Optional[int] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a : str = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : str = in_proj_weight[
: config.hidden_size, :
]
a : int = in_proj_bias[: config.hidden_size]
a : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Any = in_proj_weight[
-config.hidden_size :, :
]
a : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : List[Any] , snake_case : int ) -> Optional[Any]:
"""simple docstring"""
a : Tuple = dct.pop(snake_case )
a : List[Any] = val
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int ) -> int:
"""simple docstring"""
a : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
a : Any = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
a : Union[str, Any] = 1_000
a : List[Any] = 'huggingface/label-files'
a : int = 'imagenet-1k-id2label.json'
a : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : int = {int(snake_case ): v for k, v in idalabel.items()}
a : Optional[int] = idalabel
a : Union[str, Any] = {v: k for k, v in idalabel.items()}
a : str = int(deit_name[-6:-4] )
a : List[str] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
a : int = 192
a : Dict = 768
a : Optional[Any] = 12
a : List[Any] = 3
elif deit_name[9:].startswith('small' ):
a : str = 384
a : Optional[Any] = 1_536
a : List[str] = 12
a : Optional[Any] = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
a : Dict = 1_024
a : Optional[Any] = 4_096
a : Any = 24
a : str = 16
# load original model from timm
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a : List[Any] = timm_model.state_dict()
a : Tuple = create_rename_keys(snake_case , snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , snake_case )
# load HuggingFace model
a : Union[str, Any] = DeiTForImageClassificationWithTeacher(snake_case ).eval()
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by DeiTImageProcessor
a : Optional[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
a : List[str] = DeiTImageProcessor(size=snake_case , crop_size=config.image_size )
a : Dict = image_processor(images=prepare_img() , return_tensors='pt' )
a : Dict = encoding['pixel_values']
a : Optional[int] = model(snake_case )
a : List[str] = timm_model(snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1E-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : List[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 345 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('List is empty' )
return sum(snake_case ) / len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : str , snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
# Load configuration defined in the metadata file
with open(snake_case ) as metadata_file:
a : Union[str, Any] = json.load(snake_case )
a : List[Any] = LukeConfig(use_entity_aware_attention=snake_case , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
a : List[Any] = torch.load(snake_case , map_location='cpu' )['module']
# Load the entity vocab file
a : Tuple = load_original_entity_vocab(snake_case )
# add an entry for [MASK2]
a : List[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a : List[str] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
a : List[str] = AddedToken('<ent>' , lstrip=snake_case , rstrip=snake_case )
a : Union[str, Any] = AddedToken('<ent2>' , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , 'tokenizer_config.json' ) , 'r' ) as f:
a : Tuple = json.load(snake_case )
a : Optional[Any] = 'MLukeTokenizer'
with open(os.path.join(snake_case , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(snake_case , snake_case )
a : Dict = MLukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
a : List[Any] = tokenizer.convert_tokens_to_ids(['@'] )[0]
a : Dict = tokenizer.convert_tokens_to_ids(['#'] )[0]
a : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
a : int = word_emb[ent_init_index].unsqueeze(0 )
a : str = word_emb[enta_init_index].unsqueeze(0 )
a : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a : Any = state_dict[bias_name]
a : str = decoder_bias[ent_init_index].unsqueeze(0 )
a : Dict = decoder_bias[enta_init_index].unsqueeze(0 )
a : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a : Any = F"""encoder.layer.{layer_index}.attention.self."""
a : Dict = state_dict[prefix + matrix_name]
a : Union[str, Any] = state_dict[prefix + matrix_name]
a : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a : Optional[Any] = state_dict['entity_embeddings.entity_embeddings.weight']
a : List[Any] = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
a : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a : List[str] = state_dict['entity_predictions.bias']
a : Optional[int] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
a : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
a : Any = LukeForMaskedLM(config=snake_case ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
a : Dict = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
a : Union[str, Any] = state_dict[key]
else:
a : Optional[int] = state_dict[key]
a , a : Union[str, Any] = model.load_state_dict(snake_case , strict=snake_case )
if set(snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a : Any = MLukeTokenizer.from_pretrained(snake_case , task='entity_classification' )
a : Tuple = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
a : List[Any] = (0, 9)
a : Optional[int] = tokenizer(snake_case , entity_spans=[span] , return_tensors='pt' )
a : Dict = model(**snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a : List[str] = torch.Size((1, 33, 768) )
a : List[Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a : Tuple = torch.Size((1, 1, 768) )
a : Dict = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
a : Tuple = MLukeTokenizer.from_pretrained(snake_case )
a : Optional[Any] = 'Tokyo is the capital of <mask>.'
a : Optional[Any] = (24, 30)
a : Optional[Any] = tokenizer(snake_case , entity_spans=[span] , return_tensors='pt' )
a : Dict = model(**snake_case )
a : Union[str, Any] = encoding['input_ids'][0].tolist()
a : List[str] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
a : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case )
a : Dict = outputs.entity_logits[0][0].argmax().item()
a : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(snake_case ) )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
a : Optional[Any] = ['[MASK]', '[PAD]', '[UNK]']
a : List[str] = [json.loads(snake_case ) for line in open(snake_case )]
a : int = {}
for entry in data:
a : str = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a : Tuple = entity_id
break
a : List[Any] = F"""{language}:{entity_name}"""
a : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
UpperCamelCase : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 345 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.